hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
90a06aea55a56a0e469fdbb4673be33895cde43c
| 241,390
|
py
|
Python
|
src/hcb/codes/honeycomb/circuit_maker_test.py
|
Strilanc/honeycomb-boundaries
|
cc33baac44c7831bd643db81d0053f8ec6eae9d8
|
[
"Apache-2.0"
] | null | null | null |
src/hcb/codes/honeycomb/circuit_maker_test.py
|
Strilanc/honeycomb-boundaries
|
cc33baac44c7831bd643db81d0053f8ec6eae9d8
|
[
"Apache-2.0"
] | 2
|
2022-02-25T22:28:24.000Z
|
2022-03-23T21:09:04.000Z
|
src/hcb/codes/honeycomb/circuit_maker_test.py
|
Strilanc/honeycomb-boundaries
|
cc33baac44c7831bd643db81d0053f8ec6eae9d8
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import stim
from hcb.codes.honeycomb.layout import HoneycombLayout
@pytest.mark.parametrize("data_width,data_height,rounds,gate_set,tested_observable,decomposed_graphlike_code_distance,ignored_graphlike_code_distance", [
(8, 12, 10, 'SI1000', 'H', 5, 6),
(10, 15, 10, 'SI1000', 'H', 7, 7),
(12, 18, 10, 'SI1000', 'H', 8, 9),
(14, 21, 10, 'SI1000', 'H', 10, 10),
(16, 24, 10, 'SI1000', 'H', 11, 12),
(8, 12, 10, 'SI1000', 'V', 5, 7),
(10, 15, 10, 'SI1000', 'V', 7, 9),
(12, 18, 10, 'SI1000', 'V', 9, 11),
(14, 21, 10, 'SI1000', 'V', 11, 13),
(16, 24, 10, 'SI1000', 'V', 13, 15),
(7, 12, 10, 'SI1000', 'EPR', 6, 6),
(8, 15, 10, 'SI1000', 'EPR', 6, 7),
(10, 18, 10, 'SI1000', 'EPR', 8, 9),
(11, 21, 10, 'SI1000', 'EPR', 10, 10),
(13, 24, 10, 'SI1000', 'EPR', 12, 12),
(25, 48, 10, 'SI1000', 'EPR', 24, 24),
(8, 12, 10, 'SD6', 'H', 5, 6),
(10, 15, 10, 'SD6', 'H', 7, 7),
(12, 18, 10, 'SD6', 'H', 8, 9),
(14, 21, 10, 'SD6', 'H', 10, 10),
(16, 24, 10, 'SD6', 'H', 11, 12),
(8, 12, 10, 'SD6', 'V', 5, 7),
(10, 15, 10, 'SD6', 'V', 7, 9),
(12, 18, 10, 'SD6', 'V', 9, 11),
(14, 21, 10, 'SD6', 'V', 11, 13),
(16, 24, 10, 'SD6', 'V', 13, 15),
(7, 12, 10, 'SD6', 'EPR', 6, 6),
(8, 15, 10, 'SD6', 'EPR', 6, 7),
(10, 18, 10, 'SD6', 'EPR', 8, 9),
(11, 21, 10, 'SD6', 'EPR', 10, 10),
(13, 24, 10, 'SD6', 'EPR', 12, 12),
(25, 48, 10, 'SD6', 'EPR', 24, 24),
(8, 12, 10, 'EM3_v1', 'H', 4, 4),
(8, 15, 10, 'EM3_v1', 'H', 5, 5),
(10, 12, 10, 'EM3_v1', 'H', 4, 4),
(8, 12, 10, 'EM3_v1', 'V', 4, 4),
(8, 15, 10, 'EM3_v1', 'V', 4, 4),
(10, 12, 10, 'EM3_v1', 'V', 5, 5),
(8, 12, 10, 'EM3_v1', 'EPR', 4, 4),
(8, 15, 10, 'EM3_v1', 'EPR', 4, 4),
(10, 12, 10, 'EM3_v1', 'EPR', 4, 4),
(10, 15, 10, 'EM3_v1', 'EPR', 5, 5),
(8, 12, 10, 'EM3_v2', 'EPR', 4, 4),
])
def test_graphlike_code_distances(*,
data_width: int,
data_height: int,
rounds: int,
gate_set: str,
tested_observable: str,
ignored_graphlike_code_distance: int,
decomposed_graphlike_code_distance: int):
layout = HoneycombLayout(data_width=data_width,
data_height=data_height,
rounds=rounds,
noise_level=0.001,
noisy_gate_set=gate_set,
tested_observable=tested_observable)
circuit = layout.noisy_circuit()
err = circuit.shortest_graphlike_error(ignore_ungraphlike_errors=True)
assert len(err) == ignored_graphlike_code_distance, "ignored"
dem = circuit.detector_error_model(decompose_errors=True)
err = dem.shortest_graphlike_error(ignore_ungraphlike_errors=False)
assert len(err) == decomposed_graphlike_code_distance, "decomposed"
def test_exact_circuit_EM3_v1_H():
layout = HoneycombLayout(data_width=2,
data_height=6,
rounds=100,
noise_level=0.125,
noisy_gate_set='EM3_v1',
tested_observable='H',
sheared=True)
assert layout.ideal_and_noisy_circuit[1] == stim.Circuit("""
QUBIT_COORDS(0, 0) 0
QUBIT_COORDS(1, 0) 1
QUBIT_COORDS(1, 1) 2
QUBIT_COORDS(1, 2) 3
QUBIT_COORDS(1, 3) 4
QUBIT_COORDS(2, 1) 5
QUBIT_COORDS(2, 2) 6
QUBIT_COORDS(2, 3) 7
QUBIT_COORDS(2, 4) 8
QUBIT_COORDS(2, 5) 9
QUBIT_COORDS(3, 4) 10
QUBIT_COORDS(3, 5) 11
R 0 1 2 3 4 5 6 7 8 9 10 11
X_ERROR(0.125) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
H_YZ 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.125) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
DEPOLARIZE2(0.125) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 4 5 10 3 9 6 11
DEPOLARIZE2(0.125) 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 1 9 11
DEPOLARIZE2(0.125) 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE2(0.125) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
DETECTOR(1.5, 2, 0) rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 1 9 11
DEPOLARIZE2(0.125) 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 4 5 10 3 9 6 11
DEPOLARIZE2(0.125) 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
REPEAT 48 {
DEPOLARIZE2(0.125) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 4 5 10 3 9 6 11
DEPOLARIZE2(0.125) 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-24] rec[-22] rec[-19] rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-23] rec[-18] rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-26] rec[-25] rec[-20] rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-21] rec[-17] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 1 9 11
DEPOLARIZE2(0.125) 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE2(0.125) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
DETECTOR(1.5, 2, 0) rec[-54] rec[-53] rec[-49] rec[-46] rec[-45] rec[-42] rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 1 9 11
DEPOLARIZE2(0.125) 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 4 5 10 3 9 6 11
DEPOLARIZE2(0.125) 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
}
DEPOLARIZE2(0.125) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 4 5 10 3 9 6 11
DEPOLARIZE2(0.125) 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-24] rec[-22] rec[-19] rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-23] rec[-18] rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-26] rec[-25] rec[-20] rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-21] rec[-17] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 1 9 11
DEPOLARIZE2(0.125) 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE2(0.125) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
DETECTOR(1.5, 2, 0) rec[-54] rec[-53] rec[-49] rec[-46] rec[-45] rec[-42] rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 1 9 11
DEPOLARIZE2(0.125) 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 4 5 10 3 9 6 11
DEPOLARIZE2(0.125) 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 1 2 3 4 5 6 7 8 9 10 11
MPP(0.125) Y0 Y1 Y2 Y3 Y4 Y5 Y6 Y7 Y8 Y9 Y10 Y11
DETECTOR(0, 0.5, 0) rec[-22] rec[-12]
DETECTOR(1, 0.5, 0) rec[-21] rec[-11] rec[-10]
DETECTOR(1, 3.5, 0) rec[-20] rec[-8]
DETECTOR(2, 0.5, 0) rec[-19] rec[-7]
DETECTOR(2, 3.5, 0) rec[-18] rec[-5] rec[-4]
DETECTOR(3, 3.5, 0) rec[-17] rec[-2]
DETECTOR(0.5, 2, 0) rec[-16] rec[-9]
DETECTOR(1.5, 5, 0) rec[-15] rec[-3]
DETECTOR(2.5, 2, 0) rec[-14] rec[-6]
DETECTOR(3.5, 5, 0) rec[-13] rec[-1]
DETECTOR(1.5, 2, 0) rec[-36] rec[-35] rec[-31] rec[-28] rec[-27] rec[-24] rec[-10] rec[-9] rec[-8] rec[-7] rec[-6] rec[-5]
OBSERVABLE_INCLUDE(1) rec[-4] rec[-3] rec[-2] rec[-1]
TICK
""")
def test_exact_circuit_EM3_v2_H():
layout = HoneycombLayout(data_width=2,
data_height=6,
rounds=100,
noise_level=0.125,
noisy_gate_set='EM3_v2',
tested_observable='H',
sheared=True)
assert layout.ideal_and_noisy_circuit[1] == stim.Circuit("""
QUBIT_COORDS(0, 0) 0
QUBIT_COORDS(1, 0) 1
QUBIT_COORDS(1, 1) 2
QUBIT_COORDS(1, 2) 3
QUBIT_COORDS(1, 3) 4
QUBIT_COORDS(2, 1) 5
QUBIT_COORDS(2, 2) 6
QUBIT_COORDS(2, 3) 7
QUBIT_COORDS(2, 4) 8
QUBIT_COORDS(2, 5) 9
QUBIT_COORDS(3, 4) 10
QUBIT_COORDS(3, 5) 11
R 0 1 2 3 4 5 6 7 8 9 10 11
X_ERROR(0.0625) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
H_YZ 0 1 2 3 4 5 6 7 8 9 10 11
TICK
R 22
XCX 2 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X3
E(0.00415549) X2 X3 X22
E(0.00415549) X2 Y3
E(0.00415549) X2 Y3 X22
E(0.00415549) X2 Z3
E(0.00415549) X2 Z3 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X3
E(0.00415549) Y2 X3 X22
E(0.00415549) Y2 Y3
E(0.00415549) Y2 Y3 X22
E(0.00415549) Y2 Z3
E(0.00415549) Y2 Z3 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X3
E(0.00415549) Z2 X3 X22
E(0.00415549) Z2 Y3
E(0.00415549) Z2 Y3 X22
E(0.00415549) Z2 Z3
E(0.00415549) Z2 Z3 X22
M 22
R 22
XCX 6 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X5
E(0.00415549) X6 X5 X22
E(0.00415549) X6 Y5
E(0.00415549) X6 Y5 X22
E(0.00415549) X6 Z5
E(0.00415549) X6 Z5 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X5
E(0.00415549) Y6 X5 X22
E(0.00415549) Y6 Y5
E(0.00415549) Y6 Y5 X22
E(0.00415549) Y6 Z5
E(0.00415549) Y6 Z5 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X5
E(0.00415549) Z6 X5 X22
E(0.00415549) Z6 Y5
E(0.00415549) Z6 Y5 X22
E(0.00415549) Z6 Z5
E(0.00415549) Z6 Z5 X22
M 22
R 22
XCX 8 22 9 22
E(0.00415549) X22
E(0.00415549) X9
E(0.00415549) X9 X22
E(0.00415549) Y9
E(0.00415549) Y9 X22
E(0.00415549) Z9
E(0.00415549) Z9 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X9
E(0.00415549) X8 X9 X22
E(0.00415549) X8 Y9
E(0.00415549) X8 Y9 X22
E(0.00415549) X8 Z9
E(0.00415549) X8 Z9 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X9
E(0.00415549) Y8 X9 X22
E(0.00415549) Y8 Y9
E(0.00415549) Y8 Y9 X22
E(0.00415549) Y8 Z9
E(0.00415549) Y8 Z9 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X9
E(0.00415549) Z8 X9 X22
E(0.00415549) Z8 Y9
E(0.00415549) Z8 Y9 X22
E(0.00415549) Z8 Z9
E(0.00415549) Z8 Z9 X22
M 22
R 22
XCX 11 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X11
E(0.00415549) X11 X22
E(0.00415549) X11 X10
E(0.00415549) X11 X10 X22
E(0.00415549) X11 Y10
E(0.00415549) X11 Y10 X22
E(0.00415549) X11 Z10
E(0.00415549) X11 Z10 X22
E(0.00415549) Y11
E(0.00415549) Y11 X22
E(0.00415549) Y11 X10
E(0.00415549) Y11 X10 X22
E(0.00415549) Y11 Y10
E(0.00415549) Y11 Y10 X22
E(0.00415549) Y11 Z10
E(0.00415549) Y11 Z10 X22
E(0.00415549) Z11
E(0.00415549) Z11 X22
E(0.00415549) Z11 X10
E(0.00415549) Z11 X10 X22
E(0.00415549) Z11 Y10
E(0.00415549) Z11 Y10 X22
E(0.00415549) Z11 Z10
E(0.00415549) Z11 Z10 X22
M 22
R 22
XCX 0 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X0
E(0.00415549) X0 X22
E(0.00415549) X0 X1
E(0.00415549) X0 X1 X22
E(0.00415549) X0 Y1
E(0.00415549) X0 Y1 X22
E(0.00415549) X0 Z1
E(0.00415549) X0 Z1 X22
E(0.00415549) Y0
E(0.00415549) Y0 X22
E(0.00415549) Y0 X1
E(0.00415549) Y0 X1 X22
E(0.00415549) Y0 Y1
E(0.00415549) Y0 Y1 X22
E(0.00415549) Y0 Z1
E(0.00415549) Y0 Z1 X22
E(0.00415549) Z0
E(0.00415549) Z0 X22
E(0.00415549) Z0 X1
E(0.00415549) Z0 X1 X22
E(0.00415549) Z0 Y1
E(0.00415549) Z0 Y1 X22
E(0.00415549) Z0 Z1
E(0.00415549) Z0 Z1 X22
M 22
R 22
XCX 4 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X7
E(0.00415549) X4 X7 X22
E(0.00415549) X4 Y7
E(0.00415549) X4 Y7 X22
E(0.00415549) X4 Z7
E(0.00415549) X4 Z7 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X7
E(0.00415549) Y4 X7 X22
E(0.00415549) Y4 Y7
E(0.00415549) Y4 Y7 X22
E(0.00415549) Y4 Z7
E(0.00415549) Y4 Z7 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X7
E(0.00415549) Z4 X7 X22
E(0.00415549) Z4 Y7
E(0.00415549) Z4 Y7 X22
E(0.00415549) Z4 Z7
E(0.00415549) Z4 Z7 X22
M 22
SHIFT_COORDS(0, 0, 1)
TICK
X_ERROR(0.125) 0 4 5 10 3 9 6 11
MPP(0.125) Y0
R 22
YCX 2 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X1
E(0.00415549) X2 X1 X22
E(0.00415549) X2 Y1
E(0.00415549) X2 Y1 X22
E(0.00415549) X2 Z1
E(0.00415549) X2 Z1 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X1
E(0.00415549) Y2 X1 X22
E(0.00415549) Y2 Y1
E(0.00415549) Y2 Y1 X22
E(0.00415549) Y2 Z1
E(0.00415549) Y2 Z1 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X1
E(0.00415549) Z2 X1 X22
E(0.00415549) Z2 Y1
E(0.00415549) Z2 Y1 X22
E(0.00415549) Z2 Z1
E(0.00415549) Z2 Z1 X22
M 22
MPP(0.125) Y4 Y5
R 22
YCX 8 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X7
E(0.00415549) X8 X7 X22
E(0.00415549) X8 Y7
E(0.00415549) X8 Y7 X22
E(0.00415549) X8 Z7
E(0.00415549) X8 Z7 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X7
E(0.00415549) Y8 X7 X22
E(0.00415549) Y8 Y7
E(0.00415549) Y8 Y7 X22
E(0.00415549) Y8 Z7
E(0.00415549) Y8 Z7 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X7
E(0.00415549) Z8 X7 X22
E(0.00415549) Z8 Y7
E(0.00415549) Z8 Y7 X22
E(0.00415549) Z8 Z7
E(0.00415549) Z8 Z7 X22
M 22
MPP(0.125) Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
X_ERROR(0.125) 0 1 9 11
MPP(0.125) Z0 Z1
R 22
CX 4 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X3
E(0.00415549) X4 X3 X22
E(0.00415549) X4 Y3
E(0.00415549) X4 Y3 X22
E(0.00415549) X4 Z3
E(0.00415549) X4 Z3 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X3
E(0.00415549) Y4 X3 X22
E(0.00415549) Y4 Y3
E(0.00415549) Y4 Y3 X22
E(0.00415549) Y4 Z3
E(0.00415549) Y4 Z3 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X3
E(0.00415549) Z4 X3 X22
E(0.00415549) Z4 Y3
E(0.00415549) Z4 Y3 X22
E(0.00415549) Z4 Z3
E(0.00415549) Z4 Z3 X22
M 22
R 22
CX 6 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X7
E(0.00415549) X6 X7 X22
E(0.00415549) X6 Y7
E(0.00415549) X6 Y7 X22
E(0.00415549) X6 Z7
E(0.00415549) X6 Z7 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X7
E(0.00415549) Y6 X7 X22
E(0.00415549) Y6 Y7
E(0.00415549) Y6 Y7 X22
E(0.00415549) Y6 Z7
E(0.00415549) Y6 Z7 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X7
E(0.00415549) Z6 X7 X22
E(0.00415549) Z6 Y7
E(0.00415549) Z6 Y7 X22
E(0.00415549) Z6 Z7
E(0.00415549) Z6 Z7 X22
M 22
MPP(0.125) Z9 Z11
R 22
CX 2 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X5
E(0.00415549) X2 X5 X22
E(0.00415549) X2 Y5
E(0.00415549) X2 Y5 X22
E(0.00415549) X2 Z5
E(0.00415549) X2 Z5 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X5
E(0.00415549) Y2 X5 X22
E(0.00415549) Y2 Y5
E(0.00415549) Y2 Y5 X22
E(0.00415549) Y2 Z5
E(0.00415549) Y2 Z5 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X5
E(0.00415549) Z2 X5 X22
E(0.00415549) Z2 Y5
E(0.00415549) Z2 Y5 X22
E(0.00415549) Z2 Z5
E(0.00415549) Z2 Z5 X22
M 22
R 22
CX 8 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X10
E(0.00415549) X8 X10 X22
E(0.00415549) X8 Y10
E(0.00415549) X8 Y10 X22
E(0.00415549) X8 Z10
E(0.00415549) X8 Z10 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X10
E(0.00415549) Y8 X10 X22
E(0.00415549) Y8 Y10
E(0.00415549) Y8 Y10 X22
E(0.00415549) Y8 Z10
E(0.00415549) Y8 Z10 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X10
E(0.00415549) Z8 X10 X22
E(0.00415549) Z8 Y10
E(0.00415549) Z8 Y10 X22
E(0.00415549) Z8 Z10
E(0.00415549) Z8 Z10 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
XCX 2 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X3
E(0.00415549) X2 X3 X22
E(0.00415549) X2 Y3
E(0.00415549) X2 Y3 X22
E(0.00415549) X2 Z3
E(0.00415549) X2 Z3 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X3
E(0.00415549) Y2 X3 X22
E(0.00415549) Y2 Y3
E(0.00415549) Y2 Y3 X22
E(0.00415549) Y2 Z3
E(0.00415549) Y2 Z3 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X3
E(0.00415549) Z2 X3 X22
E(0.00415549) Z2 Y3
E(0.00415549) Z2 Y3 X22
E(0.00415549) Z2 Z3
E(0.00415549) Z2 Z3 X22
M 22
R 22
XCX 6 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X5
E(0.00415549) X6 X5 X22
E(0.00415549) X6 Y5
E(0.00415549) X6 Y5 X22
E(0.00415549) X6 Z5
E(0.00415549) X6 Z5 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X5
E(0.00415549) Y6 X5 X22
E(0.00415549) Y6 Y5
E(0.00415549) Y6 Y5 X22
E(0.00415549) Y6 Z5
E(0.00415549) Y6 Z5 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X5
E(0.00415549) Z6 X5 X22
E(0.00415549) Z6 Y5
E(0.00415549) Z6 Y5 X22
E(0.00415549) Z6 Z5
E(0.00415549) Z6 Z5 X22
M 22
R 22
XCX 8 22 9 22
E(0.00415549) X22
E(0.00415549) X9
E(0.00415549) X9 X22
E(0.00415549) Y9
E(0.00415549) Y9 X22
E(0.00415549) Z9
E(0.00415549) Z9 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X9
E(0.00415549) X8 X9 X22
E(0.00415549) X8 Y9
E(0.00415549) X8 Y9 X22
E(0.00415549) X8 Z9
E(0.00415549) X8 Z9 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X9
E(0.00415549) Y8 X9 X22
E(0.00415549) Y8 Y9
E(0.00415549) Y8 Y9 X22
E(0.00415549) Y8 Z9
E(0.00415549) Y8 Z9 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X9
E(0.00415549) Z8 X9 X22
E(0.00415549) Z8 Y9
E(0.00415549) Z8 Y9 X22
E(0.00415549) Z8 Z9
E(0.00415549) Z8 Z9 X22
M 22
R 22
XCX 11 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X11
E(0.00415549) X11 X22
E(0.00415549) X11 X10
E(0.00415549) X11 X10 X22
E(0.00415549) X11 Y10
E(0.00415549) X11 Y10 X22
E(0.00415549) X11 Z10
E(0.00415549) X11 Z10 X22
E(0.00415549) Y11
E(0.00415549) Y11 X22
E(0.00415549) Y11 X10
E(0.00415549) Y11 X10 X22
E(0.00415549) Y11 Y10
E(0.00415549) Y11 Y10 X22
E(0.00415549) Y11 Z10
E(0.00415549) Y11 Z10 X22
E(0.00415549) Z11
E(0.00415549) Z11 X22
E(0.00415549) Z11 X10
E(0.00415549) Z11 X10 X22
E(0.00415549) Z11 Y10
E(0.00415549) Z11 Y10 X22
E(0.00415549) Z11 Z10
E(0.00415549) Z11 Z10 X22
M 22
R 22
XCX 0 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X0
E(0.00415549) X0 X22
E(0.00415549) X0 X1
E(0.00415549) X0 X1 X22
E(0.00415549) X0 Y1
E(0.00415549) X0 Y1 X22
E(0.00415549) X0 Z1
E(0.00415549) X0 Z1 X22
E(0.00415549) Y0
E(0.00415549) Y0 X22
E(0.00415549) Y0 X1
E(0.00415549) Y0 X1 X22
E(0.00415549) Y0 Y1
E(0.00415549) Y0 Y1 X22
E(0.00415549) Y0 Z1
E(0.00415549) Y0 Z1 X22
E(0.00415549) Z0
E(0.00415549) Z0 X22
E(0.00415549) Z0 X1
E(0.00415549) Z0 X1 X22
E(0.00415549) Z0 Y1
E(0.00415549) Z0 Y1 X22
E(0.00415549) Z0 Z1
E(0.00415549) Z0 Z1 X22
M 22
R 22
XCX 4 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X7
E(0.00415549) X4 X7 X22
E(0.00415549) X4 Y7
E(0.00415549) X4 Y7 X22
E(0.00415549) X4 Z7
E(0.00415549) X4 Z7 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X7
E(0.00415549) Y4 X7 X22
E(0.00415549) Y4 Y7
E(0.00415549) Y4 Y7 X22
E(0.00415549) Y4 Z7
E(0.00415549) Y4 Z7 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X7
E(0.00415549) Z4 X7 X22
E(0.00415549) Z4 Y7
E(0.00415549) Z4 Y7 X22
E(0.00415549) Z4 Z7
E(0.00415549) Z4 Z7 X22
M 22
DETECTOR(1.5, 2, 0) rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
X_ERROR(0.125) 0 1 9 11
MPP(0.125) Z0 Z1
R 22
CX 4 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X3
E(0.00415549) X4 X3 X22
E(0.00415549) X4 Y3
E(0.00415549) X4 Y3 X22
E(0.00415549) X4 Z3
E(0.00415549) X4 Z3 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X3
E(0.00415549) Y4 X3 X22
E(0.00415549) Y4 Y3
E(0.00415549) Y4 Y3 X22
E(0.00415549) Y4 Z3
E(0.00415549) Y4 Z3 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X3
E(0.00415549) Z4 X3 X22
E(0.00415549) Z4 Y3
E(0.00415549) Z4 Y3 X22
E(0.00415549) Z4 Z3
E(0.00415549) Z4 Z3 X22
M 22
R 22
CX 6 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X7
E(0.00415549) X6 X7 X22
E(0.00415549) X6 Y7
E(0.00415549) X6 Y7 X22
E(0.00415549) X6 Z7
E(0.00415549) X6 Z7 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X7
E(0.00415549) Y6 X7 X22
E(0.00415549) Y6 Y7
E(0.00415549) Y6 Y7 X22
E(0.00415549) Y6 Z7
E(0.00415549) Y6 Z7 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X7
E(0.00415549) Z6 X7 X22
E(0.00415549) Z6 Y7
E(0.00415549) Z6 Y7 X22
E(0.00415549) Z6 Z7
E(0.00415549) Z6 Z7 X22
M 22
MPP(0.125) Z9 Z11
R 22
CX 2 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X5
E(0.00415549) X2 X5 X22
E(0.00415549) X2 Y5
E(0.00415549) X2 Y5 X22
E(0.00415549) X2 Z5
E(0.00415549) X2 Z5 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X5
E(0.00415549) Y2 X5 X22
E(0.00415549) Y2 Y5
E(0.00415549) Y2 Y5 X22
E(0.00415549) Y2 Z5
E(0.00415549) Y2 Z5 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X5
E(0.00415549) Z2 X5 X22
E(0.00415549) Z2 Y5
E(0.00415549) Z2 Y5 X22
E(0.00415549) Z2 Z5
E(0.00415549) Z2 Z5 X22
M 22
R 22
CX 8 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X10
E(0.00415549) X8 X10 X22
E(0.00415549) X8 Y10
E(0.00415549) X8 Y10 X22
E(0.00415549) X8 Z10
E(0.00415549) X8 Z10 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X10
E(0.00415549) Y8 X10 X22
E(0.00415549) Y8 Y10
E(0.00415549) Y8 Y10 X22
E(0.00415549) Y8 Z10
E(0.00415549) Y8 Z10 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X10
E(0.00415549) Z8 X10 X22
E(0.00415549) Z8 Y10
E(0.00415549) Z8 Y10 X22
E(0.00415549) Z8 Z10
E(0.00415549) Z8 Z10 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
X_ERROR(0.125) 0 4 5 10 3 9 6 11
MPP(0.125) Y0
R 22
YCX 2 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X1
E(0.00415549) X2 X1 X22
E(0.00415549) X2 Y1
E(0.00415549) X2 Y1 X22
E(0.00415549) X2 Z1
E(0.00415549) X2 Z1 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X1
E(0.00415549) Y2 X1 X22
E(0.00415549) Y2 Y1
E(0.00415549) Y2 Y1 X22
E(0.00415549) Y2 Z1
E(0.00415549) Y2 Z1 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X1
E(0.00415549) Z2 X1 X22
E(0.00415549) Z2 Y1
E(0.00415549) Z2 Y1 X22
E(0.00415549) Z2 Z1
E(0.00415549) Z2 Z1 X22
M 22
MPP(0.125) Y4 Y5
R 22
YCX 8 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X7
E(0.00415549) X8 X7 X22
E(0.00415549) X8 Y7
E(0.00415549) X8 Y7 X22
E(0.00415549) X8 Z7
E(0.00415549) X8 Z7 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X7
E(0.00415549) Y8 X7 X22
E(0.00415549) Y8 Y7
E(0.00415549) Y8 Y7 X22
E(0.00415549) Y8 Z7
E(0.00415549) Y8 Z7 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X7
E(0.00415549) Z8 X7 X22
E(0.00415549) Z8 Y7
E(0.00415549) Z8 Y7 X22
E(0.00415549) Z8 Z7
E(0.00415549) Z8 Z7 X22
M 22
MPP(0.125) Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
REPEAT 48 {
R 22
XCX 2 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X3
E(0.00415549) X2 X3 X22
E(0.00415549) X2 Y3
E(0.00415549) X2 Y3 X22
E(0.00415549) X2 Z3
E(0.00415549) X2 Z3 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X3
E(0.00415549) Y2 X3 X22
E(0.00415549) Y2 Y3
E(0.00415549) Y2 Y3 X22
E(0.00415549) Y2 Z3
E(0.00415549) Y2 Z3 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X3
E(0.00415549) Z2 X3 X22
E(0.00415549) Z2 Y3
E(0.00415549) Z2 Y3 X22
E(0.00415549) Z2 Z3
E(0.00415549) Z2 Z3 X22
M 22
R 22
XCX 6 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X5
E(0.00415549) X6 X5 X22
E(0.00415549) X6 Y5
E(0.00415549) X6 Y5 X22
E(0.00415549) X6 Z5
E(0.00415549) X6 Z5 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X5
E(0.00415549) Y6 X5 X22
E(0.00415549) Y6 Y5
E(0.00415549) Y6 Y5 X22
E(0.00415549) Y6 Z5
E(0.00415549) Y6 Z5 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X5
E(0.00415549) Z6 X5 X22
E(0.00415549) Z6 Y5
E(0.00415549) Z6 Y5 X22
E(0.00415549) Z6 Z5
E(0.00415549) Z6 Z5 X22
M 22
R 22
XCX 8 22 9 22
E(0.00415549) X22
E(0.00415549) X9
E(0.00415549) X9 X22
E(0.00415549) Y9
E(0.00415549) Y9 X22
E(0.00415549) Z9
E(0.00415549) Z9 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X9
E(0.00415549) X8 X9 X22
E(0.00415549) X8 Y9
E(0.00415549) X8 Y9 X22
E(0.00415549) X8 Z9
E(0.00415549) X8 Z9 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X9
E(0.00415549) Y8 X9 X22
E(0.00415549) Y8 Y9
E(0.00415549) Y8 Y9 X22
E(0.00415549) Y8 Z9
E(0.00415549) Y8 Z9 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X9
E(0.00415549) Z8 X9 X22
E(0.00415549) Z8 Y9
E(0.00415549) Z8 Y9 X22
E(0.00415549) Z8 Z9
E(0.00415549) Z8 Z9 X22
M 22
R 22
XCX 11 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X11
E(0.00415549) X11 X22
E(0.00415549) X11 X10
E(0.00415549) X11 X10 X22
E(0.00415549) X11 Y10
E(0.00415549) X11 Y10 X22
E(0.00415549) X11 Z10
E(0.00415549) X11 Z10 X22
E(0.00415549) Y11
E(0.00415549) Y11 X22
E(0.00415549) Y11 X10
E(0.00415549) Y11 X10 X22
E(0.00415549) Y11 Y10
E(0.00415549) Y11 Y10 X22
E(0.00415549) Y11 Z10
E(0.00415549) Y11 Z10 X22
E(0.00415549) Z11
E(0.00415549) Z11 X22
E(0.00415549) Z11 X10
E(0.00415549) Z11 X10 X22
E(0.00415549) Z11 Y10
E(0.00415549) Z11 Y10 X22
E(0.00415549) Z11 Z10
E(0.00415549) Z11 Z10 X22
M 22
R 22
XCX 0 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X0
E(0.00415549) X0 X22
E(0.00415549) X0 X1
E(0.00415549) X0 X1 X22
E(0.00415549) X0 Y1
E(0.00415549) X0 Y1 X22
E(0.00415549) X0 Z1
E(0.00415549) X0 Z1 X22
E(0.00415549) Y0
E(0.00415549) Y0 X22
E(0.00415549) Y0 X1
E(0.00415549) Y0 X1 X22
E(0.00415549) Y0 Y1
E(0.00415549) Y0 Y1 X22
E(0.00415549) Y0 Z1
E(0.00415549) Y0 Z1 X22
E(0.00415549) Z0
E(0.00415549) Z0 X22
E(0.00415549) Z0 X1
E(0.00415549) Z0 X1 X22
E(0.00415549) Z0 Y1
E(0.00415549) Z0 Y1 X22
E(0.00415549) Z0 Z1
E(0.00415549) Z0 Z1 X22
M 22
R 22
XCX 4 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X7
E(0.00415549) X4 X7 X22
E(0.00415549) X4 Y7
E(0.00415549) X4 Y7 X22
E(0.00415549) X4 Z7
E(0.00415549) X4 Z7 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X7
E(0.00415549) Y4 X7 X22
E(0.00415549) Y4 Y7
E(0.00415549) Y4 Y7 X22
E(0.00415549) Y4 Z7
E(0.00415549) Y4 Z7 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X7
E(0.00415549) Z4 X7 X22
E(0.00415549) Z4 Y7
E(0.00415549) Z4 Y7 X22
E(0.00415549) Z4 Z7
E(0.00415549) Z4 Z7 X22
M 22
SHIFT_COORDS(0, 0, 1)
TICK
X_ERROR(0.125) 0 4 5 10 3 9 6 11
MPP(0.125) Y0
R 22
YCX 2 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X1
E(0.00415549) X2 X1 X22
E(0.00415549) X2 Y1
E(0.00415549) X2 Y1 X22
E(0.00415549) X2 Z1
E(0.00415549) X2 Z1 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X1
E(0.00415549) Y2 X1 X22
E(0.00415549) Y2 Y1
E(0.00415549) Y2 Y1 X22
E(0.00415549) Y2 Z1
E(0.00415549) Y2 Z1 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X1
E(0.00415549) Z2 X1 X22
E(0.00415549) Z2 Y1
E(0.00415549) Z2 Y1 X22
E(0.00415549) Z2 Z1
E(0.00415549) Z2 Z1 X22
M 22
MPP(0.125) Y4 Y5
R 22
YCX 8 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X7
E(0.00415549) X8 X7 X22
E(0.00415549) X8 Y7
E(0.00415549) X8 Y7 X22
E(0.00415549) X8 Z7
E(0.00415549) X8 Z7 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X7
E(0.00415549) Y8 X7 X22
E(0.00415549) Y8 Y7
E(0.00415549) Y8 Y7 X22
E(0.00415549) Y8 Z7
E(0.00415549) Y8 Z7 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X7
E(0.00415549) Z8 X7 X22
E(0.00415549) Z8 Y7
E(0.00415549) Z8 Y7 X22
E(0.00415549) Z8 Z7
E(0.00415549) Z8 Z7 X22
M 22
MPP(0.125) Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-24] rec[-22] rec[-19] rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-23] rec[-18] rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-26] rec[-25] rec[-20] rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-21] rec[-17] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
X_ERROR(0.125) 0 1 9 11
MPP(0.125) Z0 Z1
R 22
CX 4 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X3
E(0.00415549) X4 X3 X22
E(0.00415549) X4 Y3
E(0.00415549) X4 Y3 X22
E(0.00415549) X4 Z3
E(0.00415549) X4 Z3 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X3
E(0.00415549) Y4 X3 X22
E(0.00415549) Y4 Y3
E(0.00415549) Y4 Y3 X22
E(0.00415549) Y4 Z3
E(0.00415549) Y4 Z3 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X3
E(0.00415549) Z4 X3 X22
E(0.00415549) Z4 Y3
E(0.00415549) Z4 Y3 X22
E(0.00415549) Z4 Z3
E(0.00415549) Z4 Z3 X22
M 22
R 22
CX 6 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X7
E(0.00415549) X6 X7 X22
E(0.00415549) X6 Y7
E(0.00415549) X6 Y7 X22
E(0.00415549) X6 Z7
E(0.00415549) X6 Z7 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X7
E(0.00415549) Y6 X7 X22
E(0.00415549) Y6 Y7
E(0.00415549) Y6 Y7 X22
E(0.00415549) Y6 Z7
E(0.00415549) Y6 Z7 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X7
E(0.00415549) Z6 X7 X22
E(0.00415549) Z6 Y7
E(0.00415549) Z6 Y7 X22
E(0.00415549) Z6 Z7
E(0.00415549) Z6 Z7 X22
M 22
MPP(0.125) Z9 Z11
R 22
CX 2 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X5
E(0.00415549) X2 X5 X22
E(0.00415549) X2 Y5
E(0.00415549) X2 Y5 X22
E(0.00415549) X2 Z5
E(0.00415549) X2 Z5 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X5
E(0.00415549) Y2 X5 X22
E(0.00415549) Y2 Y5
E(0.00415549) Y2 Y5 X22
E(0.00415549) Y2 Z5
E(0.00415549) Y2 Z5 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X5
E(0.00415549) Z2 X5 X22
E(0.00415549) Z2 Y5
E(0.00415549) Z2 Y5 X22
E(0.00415549) Z2 Z5
E(0.00415549) Z2 Z5 X22
M 22
R 22
CX 8 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X10
E(0.00415549) X8 X10 X22
E(0.00415549) X8 Y10
E(0.00415549) X8 Y10 X22
E(0.00415549) X8 Z10
E(0.00415549) X8 Z10 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X10
E(0.00415549) Y8 X10 X22
E(0.00415549) Y8 Y10
E(0.00415549) Y8 Y10 X22
E(0.00415549) Y8 Z10
E(0.00415549) Y8 Z10 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X10
E(0.00415549) Z8 X10 X22
E(0.00415549) Z8 Y10
E(0.00415549) Z8 Y10 X22
E(0.00415549) Z8 Z10
E(0.00415549) Z8 Z10 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
XCX 2 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X3
E(0.00415549) X2 X3 X22
E(0.00415549) X2 Y3
E(0.00415549) X2 Y3 X22
E(0.00415549) X2 Z3
E(0.00415549) X2 Z3 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X3
E(0.00415549) Y2 X3 X22
E(0.00415549) Y2 Y3
E(0.00415549) Y2 Y3 X22
E(0.00415549) Y2 Z3
E(0.00415549) Y2 Z3 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X3
E(0.00415549) Z2 X3 X22
E(0.00415549) Z2 Y3
E(0.00415549) Z2 Y3 X22
E(0.00415549) Z2 Z3
E(0.00415549) Z2 Z3 X22
M 22
R 22
XCX 6 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X5
E(0.00415549) X6 X5 X22
E(0.00415549) X6 Y5
E(0.00415549) X6 Y5 X22
E(0.00415549) X6 Z5
E(0.00415549) X6 Z5 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X5
E(0.00415549) Y6 X5 X22
E(0.00415549) Y6 Y5
E(0.00415549) Y6 Y5 X22
E(0.00415549) Y6 Z5
E(0.00415549) Y6 Z5 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X5
E(0.00415549) Z6 X5 X22
E(0.00415549) Z6 Y5
E(0.00415549) Z6 Y5 X22
E(0.00415549) Z6 Z5
E(0.00415549) Z6 Z5 X22
M 22
R 22
XCX 8 22 9 22
E(0.00415549) X22
E(0.00415549) X9
E(0.00415549) X9 X22
E(0.00415549) Y9
E(0.00415549) Y9 X22
E(0.00415549) Z9
E(0.00415549) Z9 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X9
E(0.00415549) X8 X9 X22
E(0.00415549) X8 Y9
E(0.00415549) X8 Y9 X22
E(0.00415549) X8 Z9
E(0.00415549) X8 Z9 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X9
E(0.00415549) Y8 X9 X22
E(0.00415549) Y8 Y9
E(0.00415549) Y8 Y9 X22
E(0.00415549) Y8 Z9
E(0.00415549) Y8 Z9 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X9
E(0.00415549) Z8 X9 X22
E(0.00415549) Z8 Y9
E(0.00415549) Z8 Y9 X22
E(0.00415549) Z8 Z9
E(0.00415549) Z8 Z9 X22
M 22
R 22
XCX 11 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X11
E(0.00415549) X11 X22
E(0.00415549) X11 X10
E(0.00415549) X11 X10 X22
E(0.00415549) X11 Y10
E(0.00415549) X11 Y10 X22
E(0.00415549) X11 Z10
E(0.00415549) X11 Z10 X22
E(0.00415549) Y11
E(0.00415549) Y11 X22
E(0.00415549) Y11 X10
E(0.00415549) Y11 X10 X22
E(0.00415549) Y11 Y10
E(0.00415549) Y11 Y10 X22
E(0.00415549) Y11 Z10
E(0.00415549) Y11 Z10 X22
E(0.00415549) Z11
E(0.00415549) Z11 X22
E(0.00415549) Z11 X10
E(0.00415549) Z11 X10 X22
E(0.00415549) Z11 Y10
E(0.00415549) Z11 Y10 X22
E(0.00415549) Z11 Z10
E(0.00415549) Z11 Z10 X22
M 22
R 22
XCX 0 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X0
E(0.00415549) X0 X22
E(0.00415549) X0 X1
E(0.00415549) X0 X1 X22
E(0.00415549) X0 Y1
E(0.00415549) X0 Y1 X22
E(0.00415549) X0 Z1
E(0.00415549) X0 Z1 X22
E(0.00415549) Y0
E(0.00415549) Y0 X22
E(0.00415549) Y0 X1
E(0.00415549) Y0 X1 X22
E(0.00415549) Y0 Y1
E(0.00415549) Y0 Y1 X22
E(0.00415549) Y0 Z1
E(0.00415549) Y0 Z1 X22
E(0.00415549) Z0
E(0.00415549) Z0 X22
E(0.00415549) Z0 X1
E(0.00415549) Z0 X1 X22
E(0.00415549) Z0 Y1
E(0.00415549) Z0 Y1 X22
E(0.00415549) Z0 Z1
E(0.00415549) Z0 Z1 X22
M 22
R 22
XCX 4 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X7
E(0.00415549) X4 X7 X22
E(0.00415549) X4 Y7
E(0.00415549) X4 Y7 X22
E(0.00415549) X4 Z7
E(0.00415549) X4 Z7 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X7
E(0.00415549) Y4 X7 X22
E(0.00415549) Y4 Y7
E(0.00415549) Y4 Y7 X22
E(0.00415549) Y4 Z7
E(0.00415549) Y4 Z7 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X7
E(0.00415549) Z4 X7 X22
E(0.00415549) Z4 Y7
E(0.00415549) Z4 Y7 X22
E(0.00415549) Z4 Z7
E(0.00415549) Z4 Z7 X22
M 22
DETECTOR(1.5, 2, 0) rec[-54] rec[-53] rec[-49] rec[-46] rec[-45] rec[-42] rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
X_ERROR(0.125) 0 1 9 11
MPP(0.125) Z0 Z1
R 22
CX 4 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X3
E(0.00415549) X4 X3 X22
E(0.00415549) X4 Y3
E(0.00415549) X4 Y3 X22
E(0.00415549) X4 Z3
E(0.00415549) X4 Z3 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X3
E(0.00415549) Y4 X3 X22
E(0.00415549) Y4 Y3
E(0.00415549) Y4 Y3 X22
E(0.00415549) Y4 Z3
E(0.00415549) Y4 Z3 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X3
E(0.00415549) Z4 X3 X22
E(0.00415549) Z4 Y3
E(0.00415549) Z4 Y3 X22
E(0.00415549) Z4 Z3
E(0.00415549) Z4 Z3 X22
M 22
R 22
CX 6 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X7
E(0.00415549) X6 X7 X22
E(0.00415549) X6 Y7
E(0.00415549) X6 Y7 X22
E(0.00415549) X6 Z7
E(0.00415549) X6 Z7 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X7
E(0.00415549) Y6 X7 X22
E(0.00415549) Y6 Y7
E(0.00415549) Y6 Y7 X22
E(0.00415549) Y6 Z7
E(0.00415549) Y6 Z7 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X7
E(0.00415549) Z6 X7 X22
E(0.00415549) Z6 Y7
E(0.00415549) Z6 Y7 X22
E(0.00415549) Z6 Z7
E(0.00415549) Z6 Z7 X22
M 22
MPP(0.125) Z9 Z11
R 22
CX 2 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X5
E(0.00415549) X2 X5 X22
E(0.00415549) X2 Y5
E(0.00415549) X2 Y5 X22
E(0.00415549) X2 Z5
E(0.00415549) X2 Z5 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X5
E(0.00415549) Y2 X5 X22
E(0.00415549) Y2 Y5
E(0.00415549) Y2 Y5 X22
E(0.00415549) Y2 Z5
E(0.00415549) Y2 Z5 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X5
E(0.00415549) Z2 X5 X22
E(0.00415549) Z2 Y5
E(0.00415549) Z2 Y5 X22
E(0.00415549) Z2 Z5
E(0.00415549) Z2 Z5 X22
M 22
R 22
CX 8 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X10
E(0.00415549) X8 X10 X22
E(0.00415549) X8 Y10
E(0.00415549) X8 Y10 X22
E(0.00415549) X8 Z10
E(0.00415549) X8 Z10 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X10
E(0.00415549) Y8 X10 X22
E(0.00415549) Y8 Y10
E(0.00415549) Y8 Y10 X22
E(0.00415549) Y8 Z10
E(0.00415549) Y8 Z10 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X10
E(0.00415549) Z8 X10 X22
E(0.00415549) Z8 Y10
E(0.00415549) Z8 Y10 X22
E(0.00415549) Z8 Z10
E(0.00415549) Z8 Z10 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
X_ERROR(0.125) 0 4 5 10 3 9 6 11
MPP(0.125) Y0
R 22
YCX 2 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X1
E(0.00415549) X2 X1 X22
E(0.00415549) X2 Y1
E(0.00415549) X2 Y1 X22
E(0.00415549) X2 Z1
E(0.00415549) X2 Z1 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X1
E(0.00415549) Y2 X1 X22
E(0.00415549) Y2 Y1
E(0.00415549) Y2 Y1 X22
E(0.00415549) Y2 Z1
E(0.00415549) Y2 Z1 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X1
E(0.00415549) Z2 X1 X22
E(0.00415549) Z2 Y1
E(0.00415549) Z2 Y1 X22
E(0.00415549) Z2 Z1
E(0.00415549) Z2 Z1 X22
M 22
MPP(0.125) Y4 Y5
R 22
YCX 8 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X7
E(0.00415549) X8 X7 X22
E(0.00415549) X8 Y7
E(0.00415549) X8 Y7 X22
E(0.00415549) X8 Z7
E(0.00415549) X8 Z7 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X7
E(0.00415549) Y8 X7 X22
E(0.00415549) Y8 Y7
E(0.00415549) Y8 Y7 X22
E(0.00415549) Y8 Z7
E(0.00415549) Y8 Z7 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X7
E(0.00415549) Z8 X7 X22
E(0.00415549) Z8 Y7
E(0.00415549) Z8 Y7 X22
E(0.00415549) Z8 Z7
E(0.00415549) Z8 Z7 X22
M 22
MPP(0.125) Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
}
R 22
XCX 2 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X3
E(0.00415549) X2 X3 X22
E(0.00415549) X2 Y3
E(0.00415549) X2 Y3 X22
E(0.00415549) X2 Z3
E(0.00415549) X2 Z3 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X3
E(0.00415549) Y2 X3 X22
E(0.00415549) Y2 Y3
E(0.00415549) Y2 Y3 X22
E(0.00415549) Y2 Z3
E(0.00415549) Y2 Z3 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X3
E(0.00415549) Z2 X3 X22
E(0.00415549) Z2 Y3
E(0.00415549) Z2 Y3 X22
E(0.00415549) Z2 Z3
E(0.00415549) Z2 Z3 X22
M 22
R 22
XCX 6 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X5
E(0.00415549) X6 X5 X22
E(0.00415549) X6 Y5
E(0.00415549) X6 Y5 X22
E(0.00415549) X6 Z5
E(0.00415549) X6 Z5 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X5
E(0.00415549) Y6 X5 X22
E(0.00415549) Y6 Y5
E(0.00415549) Y6 Y5 X22
E(0.00415549) Y6 Z5
E(0.00415549) Y6 Z5 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X5
E(0.00415549) Z6 X5 X22
E(0.00415549) Z6 Y5
E(0.00415549) Z6 Y5 X22
E(0.00415549) Z6 Z5
E(0.00415549) Z6 Z5 X22
M 22
R 22
XCX 8 22 9 22
E(0.00415549) X22
E(0.00415549) X9
E(0.00415549) X9 X22
E(0.00415549) Y9
E(0.00415549) Y9 X22
E(0.00415549) Z9
E(0.00415549) Z9 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X9
E(0.00415549) X8 X9 X22
E(0.00415549) X8 Y9
E(0.00415549) X8 Y9 X22
E(0.00415549) X8 Z9
E(0.00415549) X8 Z9 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X9
E(0.00415549) Y8 X9 X22
E(0.00415549) Y8 Y9
E(0.00415549) Y8 Y9 X22
E(0.00415549) Y8 Z9
E(0.00415549) Y8 Z9 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X9
E(0.00415549) Z8 X9 X22
E(0.00415549) Z8 Y9
E(0.00415549) Z8 Y9 X22
E(0.00415549) Z8 Z9
E(0.00415549) Z8 Z9 X22
M 22
R 22
XCX 11 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X11
E(0.00415549) X11 X22
E(0.00415549) X11 X10
E(0.00415549) X11 X10 X22
E(0.00415549) X11 Y10
E(0.00415549) X11 Y10 X22
E(0.00415549) X11 Z10
E(0.00415549) X11 Z10 X22
E(0.00415549) Y11
E(0.00415549) Y11 X22
E(0.00415549) Y11 X10
E(0.00415549) Y11 X10 X22
E(0.00415549) Y11 Y10
E(0.00415549) Y11 Y10 X22
E(0.00415549) Y11 Z10
E(0.00415549) Y11 Z10 X22
E(0.00415549) Z11
E(0.00415549) Z11 X22
E(0.00415549) Z11 X10
E(0.00415549) Z11 X10 X22
E(0.00415549) Z11 Y10
E(0.00415549) Z11 Y10 X22
E(0.00415549) Z11 Z10
E(0.00415549) Z11 Z10 X22
M 22
R 22
XCX 0 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X0
E(0.00415549) X0 X22
E(0.00415549) X0 X1
E(0.00415549) X0 X1 X22
E(0.00415549) X0 Y1
E(0.00415549) X0 Y1 X22
E(0.00415549) X0 Z1
E(0.00415549) X0 Z1 X22
E(0.00415549) Y0
E(0.00415549) Y0 X22
E(0.00415549) Y0 X1
E(0.00415549) Y0 X1 X22
E(0.00415549) Y0 Y1
E(0.00415549) Y0 Y1 X22
E(0.00415549) Y0 Z1
E(0.00415549) Y0 Z1 X22
E(0.00415549) Z0
E(0.00415549) Z0 X22
E(0.00415549) Z0 X1
E(0.00415549) Z0 X1 X22
E(0.00415549) Z0 Y1
E(0.00415549) Z0 Y1 X22
E(0.00415549) Z0 Z1
E(0.00415549) Z0 Z1 X22
M 22
R 22
XCX 4 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X7
E(0.00415549) X4 X7 X22
E(0.00415549) X4 Y7
E(0.00415549) X4 Y7 X22
E(0.00415549) X4 Z7
E(0.00415549) X4 Z7 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X7
E(0.00415549) Y4 X7 X22
E(0.00415549) Y4 Y7
E(0.00415549) Y4 Y7 X22
E(0.00415549) Y4 Z7
E(0.00415549) Y4 Z7 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X7
E(0.00415549) Z4 X7 X22
E(0.00415549) Z4 Y7
E(0.00415549) Z4 Y7 X22
E(0.00415549) Z4 Z7
E(0.00415549) Z4 Z7 X22
M 22
SHIFT_COORDS(0, 0, 1)
TICK
X_ERROR(0.125) 0 4 5 10 3 9 6 11
MPP(0.125) Y0
R 22
YCX 2 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X1
E(0.00415549) X2 X1 X22
E(0.00415549) X2 Y1
E(0.00415549) X2 Y1 X22
E(0.00415549) X2 Z1
E(0.00415549) X2 Z1 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X1
E(0.00415549) Y2 X1 X22
E(0.00415549) Y2 Y1
E(0.00415549) Y2 Y1 X22
E(0.00415549) Y2 Z1
E(0.00415549) Y2 Z1 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X1
E(0.00415549) Z2 X1 X22
E(0.00415549) Z2 Y1
E(0.00415549) Z2 Y1 X22
E(0.00415549) Z2 Z1
E(0.00415549) Z2 Z1 X22
M 22
MPP(0.125) Y4 Y5
R 22
YCX 8 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X7
E(0.00415549) X8 X7 X22
E(0.00415549) X8 Y7
E(0.00415549) X8 Y7 X22
E(0.00415549) X8 Z7
E(0.00415549) X8 Z7 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X7
E(0.00415549) Y8 X7 X22
E(0.00415549) Y8 Y7
E(0.00415549) Y8 Y7 X22
E(0.00415549) Y8 Z7
E(0.00415549) Y8 Z7 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X7
E(0.00415549) Z8 X7 X22
E(0.00415549) Z8 Y7
E(0.00415549) Z8 Y7 X22
E(0.00415549) Z8 Z7
E(0.00415549) Z8 Z7 X22
M 22
MPP(0.125) Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-24] rec[-22] rec[-19] rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-23] rec[-18] rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-26] rec[-25] rec[-20] rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-21] rec[-17] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
X_ERROR(0.125) 0 1 9 11
MPP(0.125) Z0 Z1
R 22
CX 4 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X3
E(0.00415549) X4 X3 X22
E(0.00415549) X4 Y3
E(0.00415549) X4 Y3 X22
E(0.00415549) X4 Z3
E(0.00415549) X4 Z3 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X3
E(0.00415549) Y4 X3 X22
E(0.00415549) Y4 Y3
E(0.00415549) Y4 Y3 X22
E(0.00415549) Y4 Z3
E(0.00415549) Y4 Z3 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X3
E(0.00415549) Z4 X3 X22
E(0.00415549) Z4 Y3
E(0.00415549) Z4 Y3 X22
E(0.00415549) Z4 Z3
E(0.00415549) Z4 Z3 X22
M 22
R 22
CX 6 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X7
E(0.00415549) X6 X7 X22
E(0.00415549) X6 Y7
E(0.00415549) X6 Y7 X22
E(0.00415549) X6 Z7
E(0.00415549) X6 Z7 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X7
E(0.00415549) Y6 X7 X22
E(0.00415549) Y6 Y7
E(0.00415549) Y6 Y7 X22
E(0.00415549) Y6 Z7
E(0.00415549) Y6 Z7 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X7
E(0.00415549) Z6 X7 X22
E(0.00415549) Z6 Y7
E(0.00415549) Z6 Y7 X22
E(0.00415549) Z6 Z7
E(0.00415549) Z6 Z7 X22
M 22
MPP(0.125) Z9 Z11
R 22
CX 2 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X5
E(0.00415549) X2 X5 X22
E(0.00415549) X2 Y5
E(0.00415549) X2 Y5 X22
E(0.00415549) X2 Z5
E(0.00415549) X2 Z5 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X5
E(0.00415549) Y2 X5 X22
E(0.00415549) Y2 Y5
E(0.00415549) Y2 Y5 X22
E(0.00415549) Y2 Z5
E(0.00415549) Y2 Z5 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X5
E(0.00415549) Z2 X5 X22
E(0.00415549) Z2 Y5
E(0.00415549) Z2 Y5 X22
E(0.00415549) Z2 Z5
E(0.00415549) Z2 Z5 X22
M 22
R 22
CX 8 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X10
E(0.00415549) X8 X10 X22
E(0.00415549) X8 Y10
E(0.00415549) X8 Y10 X22
E(0.00415549) X8 Z10
E(0.00415549) X8 Z10 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X10
E(0.00415549) Y8 X10 X22
E(0.00415549) Y8 Y10
E(0.00415549) Y8 Y10 X22
E(0.00415549) Y8 Z10
E(0.00415549) Y8 Z10 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X10
E(0.00415549) Z8 X10 X22
E(0.00415549) Z8 Y10
E(0.00415549) Z8 Y10 X22
E(0.00415549) Z8 Z10
E(0.00415549) Z8 Z10 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
XCX 2 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X3
E(0.00415549) X2 X3 X22
E(0.00415549) X2 Y3
E(0.00415549) X2 Y3 X22
E(0.00415549) X2 Z3
E(0.00415549) X2 Z3 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X3
E(0.00415549) Y2 X3 X22
E(0.00415549) Y2 Y3
E(0.00415549) Y2 Y3 X22
E(0.00415549) Y2 Z3
E(0.00415549) Y2 Z3 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X3
E(0.00415549) Z2 X3 X22
E(0.00415549) Z2 Y3
E(0.00415549) Z2 Y3 X22
E(0.00415549) Z2 Z3
E(0.00415549) Z2 Z3 X22
M 22
R 22
XCX 6 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X5
E(0.00415549) X6 X5 X22
E(0.00415549) X6 Y5
E(0.00415549) X6 Y5 X22
E(0.00415549) X6 Z5
E(0.00415549) X6 Z5 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X5
E(0.00415549) Y6 X5 X22
E(0.00415549) Y6 Y5
E(0.00415549) Y6 Y5 X22
E(0.00415549) Y6 Z5
E(0.00415549) Y6 Z5 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X5
E(0.00415549) Z6 X5 X22
E(0.00415549) Z6 Y5
E(0.00415549) Z6 Y5 X22
E(0.00415549) Z6 Z5
E(0.00415549) Z6 Z5 X22
M 22
R 22
XCX 8 22 9 22
E(0.00415549) X22
E(0.00415549) X9
E(0.00415549) X9 X22
E(0.00415549) Y9
E(0.00415549) Y9 X22
E(0.00415549) Z9
E(0.00415549) Z9 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X9
E(0.00415549) X8 X9 X22
E(0.00415549) X8 Y9
E(0.00415549) X8 Y9 X22
E(0.00415549) X8 Z9
E(0.00415549) X8 Z9 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X9
E(0.00415549) Y8 X9 X22
E(0.00415549) Y8 Y9
E(0.00415549) Y8 Y9 X22
E(0.00415549) Y8 Z9
E(0.00415549) Y8 Z9 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X9
E(0.00415549) Z8 X9 X22
E(0.00415549) Z8 Y9
E(0.00415549) Z8 Y9 X22
E(0.00415549) Z8 Z9
E(0.00415549) Z8 Z9 X22
M 22
R 22
XCX 11 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X11
E(0.00415549) X11 X22
E(0.00415549) X11 X10
E(0.00415549) X11 X10 X22
E(0.00415549) X11 Y10
E(0.00415549) X11 Y10 X22
E(0.00415549) X11 Z10
E(0.00415549) X11 Z10 X22
E(0.00415549) Y11
E(0.00415549) Y11 X22
E(0.00415549) Y11 X10
E(0.00415549) Y11 X10 X22
E(0.00415549) Y11 Y10
E(0.00415549) Y11 Y10 X22
E(0.00415549) Y11 Z10
E(0.00415549) Y11 Z10 X22
E(0.00415549) Z11
E(0.00415549) Z11 X22
E(0.00415549) Z11 X10
E(0.00415549) Z11 X10 X22
E(0.00415549) Z11 Y10
E(0.00415549) Z11 Y10 X22
E(0.00415549) Z11 Z10
E(0.00415549) Z11 Z10 X22
M 22
R 22
XCX 0 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X0
E(0.00415549) X0 X22
E(0.00415549) X0 X1
E(0.00415549) X0 X1 X22
E(0.00415549) X0 Y1
E(0.00415549) X0 Y1 X22
E(0.00415549) X0 Z1
E(0.00415549) X0 Z1 X22
E(0.00415549) Y0
E(0.00415549) Y0 X22
E(0.00415549) Y0 X1
E(0.00415549) Y0 X1 X22
E(0.00415549) Y0 Y1
E(0.00415549) Y0 Y1 X22
E(0.00415549) Y0 Z1
E(0.00415549) Y0 Z1 X22
E(0.00415549) Z0
E(0.00415549) Z0 X22
E(0.00415549) Z0 X1
E(0.00415549) Z0 X1 X22
E(0.00415549) Z0 Y1
E(0.00415549) Z0 Y1 X22
E(0.00415549) Z0 Z1
E(0.00415549) Z0 Z1 X22
M 22
R 22
XCX 4 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X7
E(0.00415549) X4 X7 X22
E(0.00415549) X4 Y7
E(0.00415549) X4 Y7 X22
E(0.00415549) X4 Z7
E(0.00415549) X4 Z7 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X7
E(0.00415549) Y4 X7 X22
E(0.00415549) Y4 Y7
E(0.00415549) Y4 Y7 X22
E(0.00415549) Y4 Z7
E(0.00415549) Y4 Z7 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X7
E(0.00415549) Z4 X7 X22
E(0.00415549) Z4 Y7
E(0.00415549) Z4 Y7 X22
E(0.00415549) Z4 Z7
E(0.00415549) Z4 Z7 X22
M 22
DETECTOR(1.5, 2, 0) rec[-54] rec[-53] rec[-49] rec[-46] rec[-45] rec[-42] rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
X_ERROR(0.125) 0 1 9 11
MPP(0.125) Z0 Z1
R 22
CX 4 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X3
E(0.00415549) X4 X3 X22
E(0.00415549) X4 Y3
E(0.00415549) X4 Y3 X22
E(0.00415549) X4 Z3
E(0.00415549) X4 Z3 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X3
E(0.00415549) Y4 X3 X22
E(0.00415549) Y4 Y3
E(0.00415549) Y4 Y3 X22
E(0.00415549) Y4 Z3
E(0.00415549) Y4 Z3 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X3
E(0.00415549) Z4 X3 X22
E(0.00415549) Z4 Y3
E(0.00415549) Z4 Y3 X22
E(0.00415549) Z4 Z3
E(0.00415549) Z4 Z3 X22
M 22
R 22
CX 6 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X7
E(0.00415549) X6 X7 X22
E(0.00415549) X6 Y7
E(0.00415549) X6 Y7 X22
E(0.00415549) X6 Z7
E(0.00415549) X6 Z7 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X7
E(0.00415549) Y6 X7 X22
E(0.00415549) Y6 Y7
E(0.00415549) Y6 Y7 X22
E(0.00415549) Y6 Z7
E(0.00415549) Y6 Z7 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X7
E(0.00415549) Z6 X7 X22
E(0.00415549) Z6 Y7
E(0.00415549) Z6 Y7 X22
E(0.00415549) Z6 Z7
E(0.00415549) Z6 Z7 X22
M 22
MPP(0.125) Z9 Z11
R 22
CX 2 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X5
E(0.00415549) X2 X5 X22
E(0.00415549) X2 Y5
E(0.00415549) X2 Y5 X22
E(0.00415549) X2 Z5
E(0.00415549) X2 Z5 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X5
E(0.00415549) Y2 X5 X22
E(0.00415549) Y2 Y5
E(0.00415549) Y2 Y5 X22
E(0.00415549) Y2 Z5
E(0.00415549) Y2 Z5 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X5
E(0.00415549) Z2 X5 X22
E(0.00415549) Z2 Y5
E(0.00415549) Z2 Y5 X22
E(0.00415549) Z2 Z5
E(0.00415549) Z2 Z5 X22
M 22
R 22
CX 8 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X10
E(0.00415549) X8 X10 X22
E(0.00415549) X8 Y10
E(0.00415549) X8 Y10 X22
E(0.00415549) X8 Z10
E(0.00415549) X8 Z10 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X10
E(0.00415549) Y8 X10 X22
E(0.00415549) Y8 Y10
E(0.00415549) Y8 Y10 X22
E(0.00415549) Y8 Z10
E(0.00415549) Y8 Z10 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X10
E(0.00415549) Z8 X10 X22
E(0.00415549) Z8 Y10
E(0.00415549) Z8 Y10 X22
E(0.00415549) Z8 Z10
E(0.00415549) Z8 Z10 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
X_ERROR(0.125) 0 4 5 10 3 9 6 11
MPP(0.125) Y0
R 22
YCX 2 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X1
E(0.00415549) X2 X1 X22
E(0.00415549) X2 Y1
E(0.00415549) X2 Y1 X22
E(0.00415549) X2 Z1
E(0.00415549) X2 Z1 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X1
E(0.00415549) Y2 X1 X22
E(0.00415549) Y2 Y1
E(0.00415549) Y2 Y1 X22
E(0.00415549) Y2 Z1
E(0.00415549) Y2 Z1 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X1
E(0.00415549) Z2 X1 X22
E(0.00415549) Z2 Y1
E(0.00415549) Z2 Y1 X22
E(0.00415549) Z2 Z1
E(0.00415549) Z2 Z1 X22
M 22
MPP(0.125) Y4 Y5
R 22
YCX 8 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X7
E(0.00415549) X8 X7 X22
E(0.00415549) X8 Y7
E(0.00415549) X8 Y7 X22
E(0.00415549) X8 Z7
E(0.00415549) X8 Z7 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X7
E(0.00415549) Y8 X7 X22
E(0.00415549) Y8 Y7
E(0.00415549) Y8 Y7 X22
E(0.00415549) Y8 Z7
E(0.00415549) Y8 Z7 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X7
E(0.00415549) Z8 X7 X22
E(0.00415549) Z8 Y7
E(0.00415549) Z8 Y7 X22
E(0.00415549) Z8 Z7
E(0.00415549) Z8 Z7 X22
M 22
MPP(0.125) Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
X_ERROR(0.125) 0 1 2 3 4 5 6 7 8 9 10 11
MPP(0.125) Y0 Y1 Y2 Y3 Y4 Y5 Y6 Y7 Y8 Y9 Y10 Y11
DETECTOR(0, 0.5, 0) rec[-22] rec[-12]
DETECTOR(1, 0.5, 0) rec[-21] rec[-11] rec[-10]
DETECTOR(1, 3.5, 0) rec[-20] rec[-8]
DETECTOR(2, 0.5, 0) rec[-19] rec[-7]
DETECTOR(2, 3.5, 0) rec[-18] rec[-5] rec[-4]
DETECTOR(3, 3.5, 0) rec[-17] rec[-2]
DETECTOR(0.5, 2, 0) rec[-16] rec[-9]
DETECTOR(1.5, 5, 0) rec[-15] rec[-3]
DETECTOR(2.5, 2, 0) rec[-14] rec[-6]
DETECTOR(3.5, 5, 0) rec[-13] rec[-1]
DETECTOR(1.5, 2, 0) rec[-36] rec[-35] rec[-31] rec[-28] rec[-27] rec[-24] rec[-10] rec[-9] rec[-8] rec[-7] rec[-6] rec[-5]
OBSERVABLE_INCLUDE(1) rec[-4] rec[-3] rec[-2] rec[-1]
TICK
""")
def test_exact_circuit_EM3_v3_H():
layout = HoneycombLayout(data_width=2,
data_height=6,
rounds=100,
noise_level=0.125,
noisy_gate_set='EM3_v3',
tested_observable='H',
sheared=True)
assert layout.ideal_and_noisy_circuit[1] == stim.Circuit("""
QUBIT_COORDS(0, 0) 0
QUBIT_COORDS(1, 0) 1
QUBIT_COORDS(1, 1) 2
QUBIT_COORDS(1, 2) 3
QUBIT_COORDS(1, 3) 4
QUBIT_COORDS(2, 1) 5
QUBIT_COORDS(2, 2) 6
QUBIT_COORDS(2, 3) 7
QUBIT_COORDS(2, 4) 8
QUBIT_COORDS(2, 5) 9
QUBIT_COORDS(3, 4) 10
QUBIT_COORDS(3, 5) 11
R 0 1 2 3 4 5 6 7 8 9 10 11
X_ERROR(0.0625) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
H_YZ 0 1 2 3 4 5 6 7 8 9 10 11
TICK
R 22
XCX 2 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X3
E(0.00415549) X2 X3 X22
E(0.00415549) X2 Y3
E(0.00415549) X2 Y3 X22
E(0.00415549) X2 Z3
E(0.00415549) X2 Z3 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X3
E(0.00415549) Y2 X3 X22
E(0.00415549) Y2 Y3
E(0.00415549) Y2 Y3 X22
E(0.00415549) Y2 Z3
E(0.00415549) Y2 Z3 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X3
E(0.00415549) Z2 X3 X22
E(0.00415549) Z2 Y3
E(0.00415549) Z2 Y3 X22
E(0.00415549) Z2 Z3
E(0.00415549) Z2 Z3 X22
M 22
R 22
XCX 6 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X5
E(0.00415549) X6 X5 X22
E(0.00415549) X6 Y5
E(0.00415549) X6 Y5 X22
E(0.00415549) X6 Z5
E(0.00415549) X6 Z5 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X5
E(0.00415549) Y6 X5 X22
E(0.00415549) Y6 Y5
E(0.00415549) Y6 Y5 X22
E(0.00415549) Y6 Z5
E(0.00415549) Y6 Z5 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X5
E(0.00415549) Z6 X5 X22
E(0.00415549) Z6 Y5
E(0.00415549) Z6 Y5 X22
E(0.00415549) Z6 Z5
E(0.00415549) Z6 Z5 X22
M 22
R 22
XCX 8 22 9 22
E(0.00415549) X22
E(0.00415549) X9
E(0.00415549) X9 X22
E(0.00415549) Y9
E(0.00415549) Y9 X22
E(0.00415549) Z9
E(0.00415549) Z9 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X9
E(0.00415549) X8 X9 X22
E(0.00415549) X8 Y9
E(0.00415549) X8 Y9 X22
E(0.00415549) X8 Z9
E(0.00415549) X8 Z9 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X9
E(0.00415549) Y8 X9 X22
E(0.00415549) Y8 Y9
E(0.00415549) Y8 Y9 X22
E(0.00415549) Y8 Z9
E(0.00415549) Y8 Z9 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X9
E(0.00415549) Z8 X9 X22
E(0.00415549) Z8 Y9
E(0.00415549) Z8 Y9 X22
E(0.00415549) Z8 Z9
E(0.00415549) Z8 Z9 X22
M 22
R 22
XCX 11 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X11
E(0.00415549) X11 X22
E(0.00415549) X11 X10
E(0.00415549) X11 X10 X22
E(0.00415549) X11 Y10
E(0.00415549) X11 Y10 X22
E(0.00415549) X11 Z10
E(0.00415549) X11 Z10 X22
E(0.00415549) Y11
E(0.00415549) Y11 X22
E(0.00415549) Y11 X10
E(0.00415549) Y11 X10 X22
E(0.00415549) Y11 Y10
E(0.00415549) Y11 Y10 X22
E(0.00415549) Y11 Z10
E(0.00415549) Y11 Z10 X22
E(0.00415549) Z11
E(0.00415549) Z11 X22
E(0.00415549) Z11 X10
E(0.00415549) Z11 X10 X22
E(0.00415549) Z11 Y10
E(0.00415549) Z11 Y10 X22
E(0.00415549) Z11 Z10
E(0.00415549) Z11 Z10 X22
M 22
R 22
XCX 0 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X0
E(0.00415549) X0 X22
E(0.00415549) X0 X1
E(0.00415549) X0 X1 X22
E(0.00415549) X0 Y1
E(0.00415549) X0 Y1 X22
E(0.00415549) X0 Z1
E(0.00415549) X0 Z1 X22
E(0.00415549) Y0
E(0.00415549) Y0 X22
E(0.00415549) Y0 X1
E(0.00415549) Y0 X1 X22
E(0.00415549) Y0 Y1
E(0.00415549) Y0 Y1 X22
E(0.00415549) Y0 Z1
E(0.00415549) Y0 Z1 X22
E(0.00415549) Z0
E(0.00415549) Z0 X22
E(0.00415549) Z0 X1
E(0.00415549) Z0 X1 X22
E(0.00415549) Z0 Y1
E(0.00415549) Z0 Y1 X22
E(0.00415549) Z0 Z1
E(0.00415549) Z0 Z1 X22
M 22
R 22
XCX 4 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X7
E(0.00415549) X4 X7 X22
E(0.00415549) X4 Y7
E(0.00415549) X4 Y7 X22
E(0.00415549) X4 Z7
E(0.00415549) X4 Z7 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X7
E(0.00415549) Y4 X7 X22
E(0.00415549) Y4 Y7
E(0.00415549) Y4 Y7 X22
E(0.00415549) Y4 Z7
E(0.00415549) Y4 Z7 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X7
E(0.00415549) Z4 X7 X22
E(0.00415549) Z4 Y7
E(0.00415549) Z4 Y7 X22
E(0.00415549) Z4 Z7
E(0.00415549) Z4 Z7 X22
M 22
SHIFT_COORDS(0, 0, 1)
TICK
R 22
YCX 0 22
E(0.0164159) X22
E(0.0164159) X0
E(0.0164159) X0 X22
E(0.0164159) Y0
E(0.0164159) Y0 X22
E(0.0164159) Z0
E(0.0164159) Z0 X22
M 22
R 22
YCX 2 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X1
E(0.00415549) X2 X1 X22
E(0.00415549) X2 Y1
E(0.00415549) X2 Y1 X22
E(0.00415549) X2 Z1
E(0.00415549) X2 Z1 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X1
E(0.00415549) Y2 X1 X22
E(0.00415549) Y2 Y1
E(0.00415549) Y2 Y1 X22
E(0.00415549) Y2 Z1
E(0.00415549) Y2 Z1 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X1
E(0.00415549) Z2 X1 X22
E(0.00415549) Z2 Y1
E(0.00415549) Z2 Y1 X22
E(0.00415549) Z2 Z1
E(0.00415549) Z2 Z1 X22
M 22
R 22
YCX 4 22
E(0.0164159) X22
E(0.0164159) X4
E(0.0164159) X4 X22
E(0.0164159) Y4
E(0.0164159) Y4 X22
E(0.0164159) Z4
E(0.0164159) Z4 X22
M 22
R 22
YCX 5 22
E(0.0164159) X22
E(0.0164159) X5
E(0.0164159) X5 X22
E(0.0164159) Y5
E(0.0164159) Y5 X22
E(0.0164159) Z5
E(0.0164159) Z5 X22
M 22
R 22
YCX 8 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X7
E(0.00415549) X8 X7 X22
E(0.00415549) X8 Y7
E(0.00415549) X8 Y7 X22
E(0.00415549) X8 Z7
E(0.00415549) X8 Z7 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X7
E(0.00415549) Y8 X7 X22
E(0.00415549) Y8 Y7
E(0.00415549) Y8 Y7 X22
E(0.00415549) Y8 Z7
E(0.00415549) Y8 Z7 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X7
E(0.00415549) Z8 X7 X22
E(0.00415549) Z8 Y7
E(0.00415549) Z8 Y7 X22
E(0.00415549) Z8 Z7
E(0.00415549) Z8 Z7 X22
M 22
R 22
YCX 10 22
E(0.0164159) X22
E(0.0164159) X10
E(0.0164159) X10 X22
E(0.0164159) Y10
E(0.0164159) Y10 X22
E(0.0164159) Z10
E(0.0164159) Z10 X22
M 22
R 22
YCX 3 22
E(0.0164159) X22
E(0.0164159) X3
E(0.0164159) X3 X22
E(0.0164159) Y3
E(0.0164159) Y3 X22
E(0.0164159) Z3
E(0.0164159) Z3 X22
M 22
R 22
YCX 9 22
E(0.0164159) X22
E(0.0164159) X9
E(0.0164159) X9 X22
E(0.0164159) Y9
E(0.0164159) Y9 X22
E(0.0164159) Z9
E(0.0164159) Z9 X22
M 22
R 22
YCX 6 22
E(0.0164159) X22
E(0.0164159) X6
E(0.0164159) X6 X22
E(0.0164159) Y6
E(0.0164159) Y6 X22
E(0.0164159) Z6
E(0.0164159) Z6 X22
M 22
R 22
YCX 11 22
E(0.0164159) X22
E(0.0164159) X11
E(0.0164159) X11 X22
E(0.0164159) Y11
E(0.0164159) Y11 X22
E(0.0164159) Z11
E(0.0164159) Z11 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
CX 0 22
E(0.0164159) X22
E(0.0164159) X0
E(0.0164159) X0 X22
E(0.0164159) Y0
E(0.0164159) Y0 X22
E(0.0164159) Z0
E(0.0164159) Z0 X22
M 22
R 22
CX 1 22
E(0.0164159) X22
E(0.0164159) X1
E(0.0164159) X1 X22
E(0.0164159) Y1
E(0.0164159) Y1 X22
E(0.0164159) Z1
E(0.0164159) Z1 X22
M 22
R 22
CX 4 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X3
E(0.00415549) X4 X3 X22
E(0.00415549) X4 Y3
E(0.00415549) X4 Y3 X22
E(0.00415549) X4 Z3
E(0.00415549) X4 Z3 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X3
E(0.00415549) Y4 X3 X22
E(0.00415549) Y4 Y3
E(0.00415549) Y4 Y3 X22
E(0.00415549) Y4 Z3
E(0.00415549) Y4 Z3 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X3
E(0.00415549) Z4 X3 X22
E(0.00415549) Z4 Y3
E(0.00415549) Z4 Y3 X22
E(0.00415549) Z4 Z3
E(0.00415549) Z4 Z3 X22
M 22
R 22
CX 6 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X7
E(0.00415549) X6 X7 X22
E(0.00415549) X6 Y7
E(0.00415549) X6 Y7 X22
E(0.00415549) X6 Z7
E(0.00415549) X6 Z7 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X7
E(0.00415549) Y6 X7 X22
E(0.00415549) Y6 Y7
E(0.00415549) Y6 Y7 X22
E(0.00415549) Y6 Z7
E(0.00415549) Y6 Z7 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X7
E(0.00415549) Z6 X7 X22
E(0.00415549) Z6 Y7
E(0.00415549) Z6 Y7 X22
E(0.00415549) Z6 Z7
E(0.00415549) Z6 Z7 X22
M 22
R 22
CX 9 22
E(0.0164159) X22
E(0.0164159) X9
E(0.0164159) X9 X22
E(0.0164159) Y9
E(0.0164159) Y9 X22
E(0.0164159) Z9
E(0.0164159) Z9 X22
M 22
R 22
CX 11 22
E(0.0164159) X22
E(0.0164159) X11
E(0.0164159) X11 X22
E(0.0164159) Y11
E(0.0164159) Y11 X22
E(0.0164159) Z11
E(0.0164159) Z11 X22
M 22
R 22
CX 2 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X5
E(0.00415549) X2 X5 X22
E(0.00415549) X2 Y5
E(0.00415549) X2 Y5 X22
E(0.00415549) X2 Z5
E(0.00415549) X2 Z5 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X5
E(0.00415549) Y2 X5 X22
E(0.00415549) Y2 Y5
E(0.00415549) Y2 Y5 X22
E(0.00415549) Y2 Z5
E(0.00415549) Y2 Z5 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X5
E(0.00415549) Z2 X5 X22
E(0.00415549) Z2 Y5
E(0.00415549) Z2 Y5 X22
E(0.00415549) Z2 Z5
E(0.00415549) Z2 Z5 X22
M 22
R 22
CX 8 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X10
E(0.00415549) X8 X10 X22
E(0.00415549) X8 Y10
E(0.00415549) X8 Y10 X22
E(0.00415549) X8 Z10
E(0.00415549) X8 Z10 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X10
E(0.00415549) Y8 X10 X22
E(0.00415549) Y8 Y10
E(0.00415549) Y8 Y10 X22
E(0.00415549) Y8 Z10
E(0.00415549) Y8 Z10 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X10
E(0.00415549) Z8 X10 X22
E(0.00415549) Z8 Y10
E(0.00415549) Z8 Y10 X22
E(0.00415549) Z8 Z10
E(0.00415549) Z8 Z10 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
XCX 2 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X3
E(0.00415549) X2 X3 X22
E(0.00415549) X2 Y3
E(0.00415549) X2 Y3 X22
E(0.00415549) X2 Z3
E(0.00415549) X2 Z3 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X3
E(0.00415549) Y2 X3 X22
E(0.00415549) Y2 Y3
E(0.00415549) Y2 Y3 X22
E(0.00415549) Y2 Z3
E(0.00415549) Y2 Z3 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X3
E(0.00415549) Z2 X3 X22
E(0.00415549) Z2 Y3
E(0.00415549) Z2 Y3 X22
E(0.00415549) Z2 Z3
E(0.00415549) Z2 Z3 X22
M 22
R 22
XCX 6 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X5
E(0.00415549) X6 X5 X22
E(0.00415549) X6 Y5
E(0.00415549) X6 Y5 X22
E(0.00415549) X6 Z5
E(0.00415549) X6 Z5 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X5
E(0.00415549) Y6 X5 X22
E(0.00415549) Y6 Y5
E(0.00415549) Y6 Y5 X22
E(0.00415549) Y6 Z5
E(0.00415549) Y6 Z5 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X5
E(0.00415549) Z6 X5 X22
E(0.00415549) Z6 Y5
E(0.00415549) Z6 Y5 X22
E(0.00415549) Z6 Z5
E(0.00415549) Z6 Z5 X22
M 22
R 22
XCX 8 22 9 22
E(0.00415549) X22
E(0.00415549) X9
E(0.00415549) X9 X22
E(0.00415549) Y9
E(0.00415549) Y9 X22
E(0.00415549) Z9
E(0.00415549) Z9 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X9
E(0.00415549) X8 X9 X22
E(0.00415549) X8 Y9
E(0.00415549) X8 Y9 X22
E(0.00415549) X8 Z9
E(0.00415549) X8 Z9 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X9
E(0.00415549) Y8 X9 X22
E(0.00415549) Y8 Y9
E(0.00415549) Y8 Y9 X22
E(0.00415549) Y8 Z9
E(0.00415549) Y8 Z9 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X9
E(0.00415549) Z8 X9 X22
E(0.00415549) Z8 Y9
E(0.00415549) Z8 Y9 X22
E(0.00415549) Z8 Z9
E(0.00415549) Z8 Z9 X22
M 22
R 22
XCX 11 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X11
E(0.00415549) X11 X22
E(0.00415549) X11 X10
E(0.00415549) X11 X10 X22
E(0.00415549) X11 Y10
E(0.00415549) X11 Y10 X22
E(0.00415549) X11 Z10
E(0.00415549) X11 Z10 X22
E(0.00415549) Y11
E(0.00415549) Y11 X22
E(0.00415549) Y11 X10
E(0.00415549) Y11 X10 X22
E(0.00415549) Y11 Y10
E(0.00415549) Y11 Y10 X22
E(0.00415549) Y11 Z10
E(0.00415549) Y11 Z10 X22
E(0.00415549) Z11
E(0.00415549) Z11 X22
E(0.00415549) Z11 X10
E(0.00415549) Z11 X10 X22
E(0.00415549) Z11 Y10
E(0.00415549) Z11 Y10 X22
E(0.00415549) Z11 Z10
E(0.00415549) Z11 Z10 X22
M 22
R 22
XCX 0 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X0
E(0.00415549) X0 X22
E(0.00415549) X0 X1
E(0.00415549) X0 X1 X22
E(0.00415549) X0 Y1
E(0.00415549) X0 Y1 X22
E(0.00415549) X0 Z1
E(0.00415549) X0 Z1 X22
E(0.00415549) Y0
E(0.00415549) Y0 X22
E(0.00415549) Y0 X1
E(0.00415549) Y0 X1 X22
E(0.00415549) Y0 Y1
E(0.00415549) Y0 Y1 X22
E(0.00415549) Y0 Z1
E(0.00415549) Y0 Z1 X22
E(0.00415549) Z0
E(0.00415549) Z0 X22
E(0.00415549) Z0 X1
E(0.00415549) Z0 X1 X22
E(0.00415549) Z0 Y1
E(0.00415549) Z0 Y1 X22
E(0.00415549) Z0 Z1
E(0.00415549) Z0 Z1 X22
M 22
R 22
XCX 4 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X7
E(0.00415549) X4 X7 X22
E(0.00415549) X4 Y7
E(0.00415549) X4 Y7 X22
E(0.00415549) X4 Z7
E(0.00415549) X4 Z7 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X7
E(0.00415549) Y4 X7 X22
E(0.00415549) Y4 Y7
E(0.00415549) Y4 Y7 X22
E(0.00415549) Y4 Z7
E(0.00415549) Y4 Z7 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X7
E(0.00415549) Z4 X7 X22
E(0.00415549) Z4 Y7
E(0.00415549) Z4 Y7 X22
E(0.00415549) Z4 Z7
E(0.00415549) Z4 Z7 X22
M 22
DETECTOR(1.5, 2, 0) rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
CX 0 22
E(0.0164159) X22
E(0.0164159) X0
E(0.0164159) X0 X22
E(0.0164159) Y0
E(0.0164159) Y0 X22
E(0.0164159) Z0
E(0.0164159) Z0 X22
M 22
R 22
CX 1 22
E(0.0164159) X22
E(0.0164159) X1
E(0.0164159) X1 X22
E(0.0164159) Y1
E(0.0164159) Y1 X22
E(0.0164159) Z1
E(0.0164159) Z1 X22
M 22
R 22
CX 4 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X3
E(0.00415549) X4 X3 X22
E(0.00415549) X4 Y3
E(0.00415549) X4 Y3 X22
E(0.00415549) X4 Z3
E(0.00415549) X4 Z3 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X3
E(0.00415549) Y4 X3 X22
E(0.00415549) Y4 Y3
E(0.00415549) Y4 Y3 X22
E(0.00415549) Y4 Z3
E(0.00415549) Y4 Z3 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X3
E(0.00415549) Z4 X3 X22
E(0.00415549) Z4 Y3
E(0.00415549) Z4 Y3 X22
E(0.00415549) Z4 Z3
E(0.00415549) Z4 Z3 X22
M 22
R 22
CX 6 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X7
E(0.00415549) X6 X7 X22
E(0.00415549) X6 Y7
E(0.00415549) X6 Y7 X22
E(0.00415549) X6 Z7
E(0.00415549) X6 Z7 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X7
E(0.00415549) Y6 X7 X22
E(0.00415549) Y6 Y7
E(0.00415549) Y6 Y7 X22
E(0.00415549) Y6 Z7
E(0.00415549) Y6 Z7 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X7
E(0.00415549) Z6 X7 X22
E(0.00415549) Z6 Y7
E(0.00415549) Z6 Y7 X22
E(0.00415549) Z6 Z7
E(0.00415549) Z6 Z7 X22
M 22
R 22
CX 9 22
E(0.0164159) X22
E(0.0164159) X9
E(0.0164159) X9 X22
E(0.0164159) Y9
E(0.0164159) Y9 X22
E(0.0164159) Z9
E(0.0164159) Z9 X22
M 22
R 22
CX 11 22
E(0.0164159) X22
E(0.0164159) X11
E(0.0164159) X11 X22
E(0.0164159) Y11
E(0.0164159) Y11 X22
E(0.0164159) Z11
E(0.0164159) Z11 X22
M 22
R 22
CX 2 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X5
E(0.00415549) X2 X5 X22
E(0.00415549) X2 Y5
E(0.00415549) X2 Y5 X22
E(0.00415549) X2 Z5
E(0.00415549) X2 Z5 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X5
E(0.00415549) Y2 X5 X22
E(0.00415549) Y2 Y5
E(0.00415549) Y2 Y5 X22
E(0.00415549) Y2 Z5
E(0.00415549) Y2 Z5 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X5
E(0.00415549) Z2 X5 X22
E(0.00415549) Z2 Y5
E(0.00415549) Z2 Y5 X22
E(0.00415549) Z2 Z5
E(0.00415549) Z2 Z5 X22
M 22
R 22
CX 8 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X10
E(0.00415549) X8 X10 X22
E(0.00415549) X8 Y10
E(0.00415549) X8 Y10 X22
E(0.00415549) X8 Z10
E(0.00415549) X8 Z10 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X10
E(0.00415549) Y8 X10 X22
E(0.00415549) Y8 Y10
E(0.00415549) Y8 Y10 X22
E(0.00415549) Y8 Z10
E(0.00415549) Y8 Z10 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X10
E(0.00415549) Z8 X10 X22
E(0.00415549) Z8 Y10
E(0.00415549) Z8 Y10 X22
E(0.00415549) Z8 Z10
E(0.00415549) Z8 Z10 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
YCX 0 22
E(0.0164159) X22
E(0.0164159) X0
E(0.0164159) X0 X22
E(0.0164159) Y0
E(0.0164159) Y0 X22
E(0.0164159) Z0
E(0.0164159) Z0 X22
M 22
R 22
YCX 2 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X1
E(0.00415549) X2 X1 X22
E(0.00415549) X2 Y1
E(0.00415549) X2 Y1 X22
E(0.00415549) X2 Z1
E(0.00415549) X2 Z1 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X1
E(0.00415549) Y2 X1 X22
E(0.00415549) Y2 Y1
E(0.00415549) Y2 Y1 X22
E(0.00415549) Y2 Z1
E(0.00415549) Y2 Z1 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X1
E(0.00415549) Z2 X1 X22
E(0.00415549) Z2 Y1
E(0.00415549) Z2 Y1 X22
E(0.00415549) Z2 Z1
E(0.00415549) Z2 Z1 X22
M 22
R 22
YCX 4 22
E(0.0164159) X22
E(0.0164159) X4
E(0.0164159) X4 X22
E(0.0164159) Y4
E(0.0164159) Y4 X22
E(0.0164159) Z4
E(0.0164159) Z4 X22
M 22
R 22
YCX 5 22
E(0.0164159) X22
E(0.0164159) X5
E(0.0164159) X5 X22
E(0.0164159) Y5
E(0.0164159) Y5 X22
E(0.0164159) Z5
E(0.0164159) Z5 X22
M 22
R 22
YCX 8 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X7
E(0.00415549) X8 X7 X22
E(0.00415549) X8 Y7
E(0.00415549) X8 Y7 X22
E(0.00415549) X8 Z7
E(0.00415549) X8 Z7 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X7
E(0.00415549) Y8 X7 X22
E(0.00415549) Y8 Y7
E(0.00415549) Y8 Y7 X22
E(0.00415549) Y8 Z7
E(0.00415549) Y8 Z7 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X7
E(0.00415549) Z8 X7 X22
E(0.00415549) Z8 Y7
E(0.00415549) Z8 Y7 X22
E(0.00415549) Z8 Z7
E(0.00415549) Z8 Z7 X22
M 22
R 22
YCX 10 22
E(0.0164159) X22
E(0.0164159) X10
E(0.0164159) X10 X22
E(0.0164159) Y10
E(0.0164159) Y10 X22
E(0.0164159) Z10
E(0.0164159) Z10 X22
M 22
R 22
YCX 3 22
E(0.0164159) X22
E(0.0164159) X3
E(0.0164159) X3 X22
E(0.0164159) Y3
E(0.0164159) Y3 X22
E(0.0164159) Z3
E(0.0164159) Z3 X22
M 22
R 22
YCX 9 22
E(0.0164159) X22
E(0.0164159) X9
E(0.0164159) X9 X22
E(0.0164159) Y9
E(0.0164159) Y9 X22
E(0.0164159) Z9
E(0.0164159) Z9 X22
M 22
R 22
YCX 6 22
E(0.0164159) X22
E(0.0164159) X6
E(0.0164159) X6 X22
E(0.0164159) Y6
E(0.0164159) Y6 X22
E(0.0164159) Z6
E(0.0164159) Z6 X22
M 22
R 22
YCX 11 22
E(0.0164159) X22
E(0.0164159) X11
E(0.0164159) X11 X22
E(0.0164159) Y11
E(0.0164159) Y11 X22
E(0.0164159) Z11
E(0.0164159) Z11 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
REPEAT 48 {
R 22
XCX 2 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X3
E(0.00415549) X2 X3 X22
E(0.00415549) X2 Y3
E(0.00415549) X2 Y3 X22
E(0.00415549) X2 Z3
E(0.00415549) X2 Z3 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X3
E(0.00415549) Y2 X3 X22
E(0.00415549) Y2 Y3
E(0.00415549) Y2 Y3 X22
E(0.00415549) Y2 Z3
E(0.00415549) Y2 Z3 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X3
E(0.00415549) Z2 X3 X22
E(0.00415549) Z2 Y3
E(0.00415549) Z2 Y3 X22
E(0.00415549) Z2 Z3
E(0.00415549) Z2 Z3 X22
M 22
R 22
XCX 6 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X5
E(0.00415549) X6 X5 X22
E(0.00415549) X6 Y5
E(0.00415549) X6 Y5 X22
E(0.00415549) X6 Z5
E(0.00415549) X6 Z5 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X5
E(0.00415549) Y6 X5 X22
E(0.00415549) Y6 Y5
E(0.00415549) Y6 Y5 X22
E(0.00415549) Y6 Z5
E(0.00415549) Y6 Z5 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X5
E(0.00415549) Z6 X5 X22
E(0.00415549) Z6 Y5
E(0.00415549) Z6 Y5 X22
E(0.00415549) Z6 Z5
E(0.00415549) Z6 Z5 X22
M 22
R 22
XCX 8 22 9 22
E(0.00415549) X22
E(0.00415549) X9
E(0.00415549) X9 X22
E(0.00415549) Y9
E(0.00415549) Y9 X22
E(0.00415549) Z9
E(0.00415549) Z9 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X9
E(0.00415549) X8 X9 X22
E(0.00415549) X8 Y9
E(0.00415549) X8 Y9 X22
E(0.00415549) X8 Z9
E(0.00415549) X8 Z9 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X9
E(0.00415549) Y8 X9 X22
E(0.00415549) Y8 Y9
E(0.00415549) Y8 Y9 X22
E(0.00415549) Y8 Z9
E(0.00415549) Y8 Z9 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X9
E(0.00415549) Z8 X9 X22
E(0.00415549) Z8 Y9
E(0.00415549) Z8 Y9 X22
E(0.00415549) Z8 Z9
E(0.00415549) Z8 Z9 X22
M 22
R 22
XCX 11 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X11
E(0.00415549) X11 X22
E(0.00415549) X11 X10
E(0.00415549) X11 X10 X22
E(0.00415549) X11 Y10
E(0.00415549) X11 Y10 X22
E(0.00415549) X11 Z10
E(0.00415549) X11 Z10 X22
E(0.00415549) Y11
E(0.00415549) Y11 X22
E(0.00415549) Y11 X10
E(0.00415549) Y11 X10 X22
E(0.00415549) Y11 Y10
E(0.00415549) Y11 Y10 X22
E(0.00415549) Y11 Z10
E(0.00415549) Y11 Z10 X22
E(0.00415549) Z11
E(0.00415549) Z11 X22
E(0.00415549) Z11 X10
E(0.00415549) Z11 X10 X22
E(0.00415549) Z11 Y10
E(0.00415549) Z11 Y10 X22
E(0.00415549) Z11 Z10
E(0.00415549) Z11 Z10 X22
M 22
R 22
XCX 0 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X0
E(0.00415549) X0 X22
E(0.00415549) X0 X1
E(0.00415549) X0 X1 X22
E(0.00415549) X0 Y1
E(0.00415549) X0 Y1 X22
E(0.00415549) X0 Z1
E(0.00415549) X0 Z1 X22
E(0.00415549) Y0
E(0.00415549) Y0 X22
E(0.00415549) Y0 X1
E(0.00415549) Y0 X1 X22
E(0.00415549) Y0 Y1
E(0.00415549) Y0 Y1 X22
E(0.00415549) Y0 Z1
E(0.00415549) Y0 Z1 X22
E(0.00415549) Z0
E(0.00415549) Z0 X22
E(0.00415549) Z0 X1
E(0.00415549) Z0 X1 X22
E(0.00415549) Z0 Y1
E(0.00415549) Z0 Y1 X22
E(0.00415549) Z0 Z1
E(0.00415549) Z0 Z1 X22
M 22
R 22
XCX 4 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X7
E(0.00415549) X4 X7 X22
E(0.00415549) X4 Y7
E(0.00415549) X4 Y7 X22
E(0.00415549) X4 Z7
E(0.00415549) X4 Z7 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X7
E(0.00415549) Y4 X7 X22
E(0.00415549) Y4 Y7
E(0.00415549) Y4 Y7 X22
E(0.00415549) Y4 Z7
E(0.00415549) Y4 Z7 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X7
E(0.00415549) Z4 X7 X22
E(0.00415549) Z4 Y7
E(0.00415549) Z4 Y7 X22
E(0.00415549) Z4 Z7
E(0.00415549) Z4 Z7 X22
M 22
SHIFT_COORDS(0, 0, 1)
TICK
R 22
YCX 0 22
E(0.0164159) X22
E(0.0164159) X0
E(0.0164159) X0 X22
E(0.0164159) Y0
E(0.0164159) Y0 X22
E(0.0164159) Z0
E(0.0164159) Z0 X22
M 22
R 22
YCX 2 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X1
E(0.00415549) X2 X1 X22
E(0.00415549) X2 Y1
E(0.00415549) X2 Y1 X22
E(0.00415549) X2 Z1
E(0.00415549) X2 Z1 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X1
E(0.00415549) Y2 X1 X22
E(0.00415549) Y2 Y1
E(0.00415549) Y2 Y1 X22
E(0.00415549) Y2 Z1
E(0.00415549) Y2 Z1 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X1
E(0.00415549) Z2 X1 X22
E(0.00415549) Z2 Y1
E(0.00415549) Z2 Y1 X22
E(0.00415549) Z2 Z1
E(0.00415549) Z2 Z1 X22
M 22
R 22
YCX 4 22
E(0.0164159) X22
E(0.0164159) X4
E(0.0164159) X4 X22
E(0.0164159) Y4
E(0.0164159) Y4 X22
E(0.0164159) Z4
E(0.0164159) Z4 X22
M 22
R 22
YCX 5 22
E(0.0164159) X22
E(0.0164159) X5
E(0.0164159) X5 X22
E(0.0164159) Y5
E(0.0164159) Y5 X22
E(0.0164159) Z5
E(0.0164159) Z5 X22
M 22
R 22
YCX 8 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X7
E(0.00415549) X8 X7 X22
E(0.00415549) X8 Y7
E(0.00415549) X8 Y7 X22
E(0.00415549) X8 Z7
E(0.00415549) X8 Z7 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X7
E(0.00415549) Y8 X7 X22
E(0.00415549) Y8 Y7
E(0.00415549) Y8 Y7 X22
E(0.00415549) Y8 Z7
E(0.00415549) Y8 Z7 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X7
E(0.00415549) Z8 X7 X22
E(0.00415549) Z8 Y7
E(0.00415549) Z8 Y7 X22
E(0.00415549) Z8 Z7
E(0.00415549) Z8 Z7 X22
M 22
R 22
YCX 10 22
E(0.0164159) X22
E(0.0164159) X10
E(0.0164159) X10 X22
E(0.0164159) Y10
E(0.0164159) Y10 X22
E(0.0164159) Z10
E(0.0164159) Z10 X22
M 22
R 22
YCX 3 22
E(0.0164159) X22
E(0.0164159) X3
E(0.0164159) X3 X22
E(0.0164159) Y3
E(0.0164159) Y3 X22
E(0.0164159) Z3
E(0.0164159) Z3 X22
M 22
R 22
YCX 9 22
E(0.0164159) X22
E(0.0164159) X9
E(0.0164159) X9 X22
E(0.0164159) Y9
E(0.0164159) Y9 X22
E(0.0164159) Z9
E(0.0164159) Z9 X22
M 22
R 22
YCX 6 22
E(0.0164159) X22
E(0.0164159) X6
E(0.0164159) X6 X22
E(0.0164159) Y6
E(0.0164159) Y6 X22
E(0.0164159) Z6
E(0.0164159) Z6 X22
M 22
R 22
YCX 11 22
E(0.0164159) X22
E(0.0164159) X11
E(0.0164159) X11 X22
E(0.0164159) Y11
E(0.0164159) Y11 X22
E(0.0164159) Z11
E(0.0164159) Z11 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-24] rec[-22] rec[-19] rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-23] rec[-18] rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-26] rec[-25] rec[-20] rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-21] rec[-17] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
CX 0 22
E(0.0164159) X22
E(0.0164159) X0
E(0.0164159) X0 X22
E(0.0164159) Y0
E(0.0164159) Y0 X22
E(0.0164159) Z0
E(0.0164159) Z0 X22
M 22
R 22
CX 1 22
E(0.0164159) X22
E(0.0164159) X1
E(0.0164159) X1 X22
E(0.0164159) Y1
E(0.0164159) Y1 X22
E(0.0164159) Z1
E(0.0164159) Z1 X22
M 22
R 22
CX 4 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X3
E(0.00415549) X4 X3 X22
E(0.00415549) X4 Y3
E(0.00415549) X4 Y3 X22
E(0.00415549) X4 Z3
E(0.00415549) X4 Z3 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X3
E(0.00415549) Y4 X3 X22
E(0.00415549) Y4 Y3
E(0.00415549) Y4 Y3 X22
E(0.00415549) Y4 Z3
E(0.00415549) Y4 Z3 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X3
E(0.00415549) Z4 X3 X22
E(0.00415549) Z4 Y3
E(0.00415549) Z4 Y3 X22
E(0.00415549) Z4 Z3
E(0.00415549) Z4 Z3 X22
M 22
R 22
CX 6 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X7
E(0.00415549) X6 X7 X22
E(0.00415549) X6 Y7
E(0.00415549) X6 Y7 X22
E(0.00415549) X6 Z7
E(0.00415549) X6 Z7 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X7
E(0.00415549) Y6 X7 X22
E(0.00415549) Y6 Y7
E(0.00415549) Y6 Y7 X22
E(0.00415549) Y6 Z7
E(0.00415549) Y6 Z7 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X7
E(0.00415549) Z6 X7 X22
E(0.00415549) Z6 Y7
E(0.00415549) Z6 Y7 X22
E(0.00415549) Z6 Z7
E(0.00415549) Z6 Z7 X22
M 22
R 22
CX 9 22
E(0.0164159) X22
E(0.0164159) X9
E(0.0164159) X9 X22
E(0.0164159) Y9
E(0.0164159) Y9 X22
E(0.0164159) Z9
E(0.0164159) Z9 X22
M 22
R 22
CX 11 22
E(0.0164159) X22
E(0.0164159) X11
E(0.0164159) X11 X22
E(0.0164159) Y11
E(0.0164159) Y11 X22
E(0.0164159) Z11
E(0.0164159) Z11 X22
M 22
R 22
CX 2 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X5
E(0.00415549) X2 X5 X22
E(0.00415549) X2 Y5
E(0.00415549) X2 Y5 X22
E(0.00415549) X2 Z5
E(0.00415549) X2 Z5 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X5
E(0.00415549) Y2 X5 X22
E(0.00415549) Y2 Y5
E(0.00415549) Y2 Y5 X22
E(0.00415549) Y2 Z5
E(0.00415549) Y2 Z5 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X5
E(0.00415549) Z2 X5 X22
E(0.00415549) Z2 Y5
E(0.00415549) Z2 Y5 X22
E(0.00415549) Z2 Z5
E(0.00415549) Z2 Z5 X22
M 22
R 22
CX 8 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X10
E(0.00415549) X8 X10 X22
E(0.00415549) X8 Y10
E(0.00415549) X8 Y10 X22
E(0.00415549) X8 Z10
E(0.00415549) X8 Z10 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X10
E(0.00415549) Y8 X10 X22
E(0.00415549) Y8 Y10
E(0.00415549) Y8 Y10 X22
E(0.00415549) Y8 Z10
E(0.00415549) Y8 Z10 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X10
E(0.00415549) Z8 X10 X22
E(0.00415549) Z8 Y10
E(0.00415549) Z8 Y10 X22
E(0.00415549) Z8 Z10
E(0.00415549) Z8 Z10 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
XCX 2 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X3
E(0.00415549) X2 X3 X22
E(0.00415549) X2 Y3
E(0.00415549) X2 Y3 X22
E(0.00415549) X2 Z3
E(0.00415549) X2 Z3 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X3
E(0.00415549) Y2 X3 X22
E(0.00415549) Y2 Y3
E(0.00415549) Y2 Y3 X22
E(0.00415549) Y2 Z3
E(0.00415549) Y2 Z3 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X3
E(0.00415549) Z2 X3 X22
E(0.00415549) Z2 Y3
E(0.00415549) Z2 Y3 X22
E(0.00415549) Z2 Z3
E(0.00415549) Z2 Z3 X22
M 22
R 22
XCX 6 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X5
E(0.00415549) X6 X5 X22
E(0.00415549) X6 Y5
E(0.00415549) X6 Y5 X22
E(0.00415549) X6 Z5
E(0.00415549) X6 Z5 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X5
E(0.00415549) Y6 X5 X22
E(0.00415549) Y6 Y5
E(0.00415549) Y6 Y5 X22
E(0.00415549) Y6 Z5
E(0.00415549) Y6 Z5 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X5
E(0.00415549) Z6 X5 X22
E(0.00415549) Z6 Y5
E(0.00415549) Z6 Y5 X22
E(0.00415549) Z6 Z5
E(0.00415549) Z6 Z5 X22
M 22
R 22
XCX 8 22 9 22
E(0.00415549) X22
E(0.00415549) X9
E(0.00415549) X9 X22
E(0.00415549) Y9
E(0.00415549) Y9 X22
E(0.00415549) Z9
E(0.00415549) Z9 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X9
E(0.00415549) X8 X9 X22
E(0.00415549) X8 Y9
E(0.00415549) X8 Y9 X22
E(0.00415549) X8 Z9
E(0.00415549) X8 Z9 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X9
E(0.00415549) Y8 X9 X22
E(0.00415549) Y8 Y9
E(0.00415549) Y8 Y9 X22
E(0.00415549) Y8 Z9
E(0.00415549) Y8 Z9 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X9
E(0.00415549) Z8 X9 X22
E(0.00415549) Z8 Y9
E(0.00415549) Z8 Y9 X22
E(0.00415549) Z8 Z9
E(0.00415549) Z8 Z9 X22
M 22
R 22
XCX 11 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X11
E(0.00415549) X11 X22
E(0.00415549) X11 X10
E(0.00415549) X11 X10 X22
E(0.00415549) X11 Y10
E(0.00415549) X11 Y10 X22
E(0.00415549) X11 Z10
E(0.00415549) X11 Z10 X22
E(0.00415549) Y11
E(0.00415549) Y11 X22
E(0.00415549) Y11 X10
E(0.00415549) Y11 X10 X22
E(0.00415549) Y11 Y10
E(0.00415549) Y11 Y10 X22
E(0.00415549) Y11 Z10
E(0.00415549) Y11 Z10 X22
E(0.00415549) Z11
E(0.00415549) Z11 X22
E(0.00415549) Z11 X10
E(0.00415549) Z11 X10 X22
E(0.00415549) Z11 Y10
E(0.00415549) Z11 Y10 X22
E(0.00415549) Z11 Z10
E(0.00415549) Z11 Z10 X22
M 22
R 22
XCX 0 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X0
E(0.00415549) X0 X22
E(0.00415549) X0 X1
E(0.00415549) X0 X1 X22
E(0.00415549) X0 Y1
E(0.00415549) X0 Y1 X22
E(0.00415549) X0 Z1
E(0.00415549) X0 Z1 X22
E(0.00415549) Y0
E(0.00415549) Y0 X22
E(0.00415549) Y0 X1
E(0.00415549) Y0 X1 X22
E(0.00415549) Y0 Y1
E(0.00415549) Y0 Y1 X22
E(0.00415549) Y0 Z1
E(0.00415549) Y0 Z1 X22
E(0.00415549) Z0
E(0.00415549) Z0 X22
E(0.00415549) Z0 X1
E(0.00415549) Z0 X1 X22
E(0.00415549) Z0 Y1
E(0.00415549) Z0 Y1 X22
E(0.00415549) Z0 Z1
E(0.00415549) Z0 Z1 X22
M 22
R 22
XCX 4 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X7
E(0.00415549) X4 X7 X22
E(0.00415549) X4 Y7
E(0.00415549) X4 Y7 X22
E(0.00415549) X4 Z7
E(0.00415549) X4 Z7 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X7
E(0.00415549) Y4 X7 X22
E(0.00415549) Y4 Y7
E(0.00415549) Y4 Y7 X22
E(0.00415549) Y4 Z7
E(0.00415549) Y4 Z7 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X7
E(0.00415549) Z4 X7 X22
E(0.00415549) Z4 Y7
E(0.00415549) Z4 Y7 X22
E(0.00415549) Z4 Z7
E(0.00415549) Z4 Z7 X22
M 22
DETECTOR(1.5, 2, 0) rec[-54] rec[-53] rec[-49] rec[-46] rec[-45] rec[-42] rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
CX 0 22
E(0.0164159) X22
E(0.0164159) X0
E(0.0164159) X0 X22
E(0.0164159) Y0
E(0.0164159) Y0 X22
E(0.0164159) Z0
E(0.0164159) Z0 X22
M 22
R 22
CX 1 22
E(0.0164159) X22
E(0.0164159) X1
E(0.0164159) X1 X22
E(0.0164159) Y1
E(0.0164159) Y1 X22
E(0.0164159) Z1
E(0.0164159) Z1 X22
M 22
R 22
CX 4 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X3
E(0.00415549) X4 X3 X22
E(0.00415549) X4 Y3
E(0.00415549) X4 Y3 X22
E(0.00415549) X4 Z3
E(0.00415549) X4 Z3 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X3
E(0.00415549) Y4 X3 X22
E(0.00415549) Y4 Y3
E(0.00415549) Y4 Y3 X22
E(0.00415549) Y4 Z3
E(0.00415549) Y4 Z3 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X3
E(0.00415549) Z4 X3 X22
E(0.00415549) Z4 Y3
E(0.00415549) Z4 Y3 X22
E(0.00415549) Z4 Z3
E(0.00415549) Z4 Z3 X22
M 22
R 22
CX 6 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X7
E(0.00415549) X6 X7 X22
E(0.00415549) X6 Y7
E(0.00415549) X6 Y7 X22
E(0.00415549) X6 Z7
E(0.00415549) X6 Z7 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X7
E(0.00415549) Y6 X7 X22
E(0.00415549) Y6 Y7
E(0.00415549) Y6 Y7 X22
E(0.00415549) Y6 Z7
E(0.00415549) Y6 Z7 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X7
E(0.00415549) Z6 X7 X22
E(0.00415549) Z6 Y7
E(0.00415549) Z6 Y7 X22
E(0.00415549) Z6 Z7
E(0.00415549) Z6 Z7 X22
M 22
R 22
CX 9 22
E(0.0164159) X22
E(0.0164159) X9
E(0.0164159) X9 X22
E(0.0164159) Y9
E(0.0164159) Y9 X22
E(0.0164159) Z9
E(0.0164159) Z9 X22
M 22
R 22
CX 11 22
E(0.0164159) X22
E(0.0164159) X11
E(0.0164159) X11 X22
E(0.0164159) Y11
E(0.0164159) Y11 X22
E(0.0164159) Z11
E(0.0164159) Z11 X22
M 22
R 22
CX 2 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X5
E(0.00415549) X2 X5 X22
E(0.00415549) X2 Y5
E(0.00415549) X2 Y5 X22
E(0.00415549) X2 Z5
E(0.00415549) X2 Z5 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X5
E(0.00415549) Y2 X5 X22
E(0.00415549) Y2 Y5
E(0.00415549) Y2 Y5 X22
E(0.00415549) Y2 Z5
E(0.00415549) Y2 Z5 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X5
E(0.00415549) Z2 X5 X22
E(0.00415549) Z2 Y5
E(0.00415549) Z2 Y5 X22
E(0.00415549) Z2 Z5
E(0.00415549) Z2 Z5 X22
M 22
R 22
CX 8 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X10
E(0.00415549) X8 X10 X22
E(0.00415549) X8 Y10
E(0.00415549) X8 Y10 X22
E(0.00415549) X8 Z10
E(0.00415549) X8 Z10 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X10
E(0.00415549) Y8 X10 X22
E(0.00415549) Y8 Y10
E(0.00415549) Y8 Y10 X22
E(0.00415549) Y8 Z10
E(0.00415549) Y8 Z10 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X10
E(0.00415549) Z8 X10 X22
E(0.00415549) Z8 Y10
E(0.00415549) Z8 Y10 X22
E(0.00415549) Z8 Z10
E(0.00415549) Z8 Z10 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
YCX 0 22
E(0.0164159) X22
E(0.0164159) X0
E(0.0164159) X0 X22
E(0.0164159) Y0
E(0.0164159) Y0 X22
E(0.0164159) Z0
E(0.0164159) Z0 X22
M 22
R 22
YCX 2 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X1
E(0.00415549) X2 X1 X22
E(0.00415549) X2 Y1
E(0.00415549) X2 Y1 X22
E(0.00415549) X2 Z1
E(0.00415549) X2 Z1 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X1
E(0.00415549) Y2 X1 X22
E(0.00415549) Y2 Y1
E(0.00415549) Y2 Y1 X22
E(0.00415549) Y2 Z1
E(0.00415549) Y2 Z1 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X1
E(0.00415549) Z2 X1 X22
E(0.00415549) Z2 Y1
E(0.00415549) Z2 Y1 X22
E(0.00415549) Z2 Z1
E(0.00415549) Z2 Z1 X22
M 22
R 22
YCX 4 22
E(0.0164159) X22
E(0.0164159) X4
E(0.0164159) X4 X22
E(0.0164159) Y4
E(0.0164159) Y4 X22
E(0.0164159) Z4
E(0.0164159) Z4 X22
M 22
R 22
YCX 5 22
E(0.0164159) X22
E(0.0164159) X5
E(0.0164159) X5 X22
E(0.0164159) Y5
E(0.0164159) Y5 X22
E(0.0164159) Z5
E(0.0164159) Z5 X22
M 22
R 22
YCX 8 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X7
E(0.00415549) X8 X7 X22
E(0.00415549) X8 Y7
E(0.00415549) X8 Y7 X22
E(0.00415549) X8 Z7
E(0.00415549) X8 Z7 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X7
E(0.00415549) Y8 X7 X22
E(0.00415549) Y8 Y7
E(0.00415549) Y8 Y7 X22
E(0.00415549) Y8 Z7
E(0.00415549) Y8 Z7 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X7
E(0.00415549) Z8 X7 X22
E(0.00415549) Z8 Y7
E(0.00415549) Z8 Y7 X22
E(0.00415549) Z8 Z7
E(0.00415549) Z8 Z7 X22
M 22
R 22
YCX 10 22
E(0.0164159) X22
E(0.0164159) X10
E(0.0164159) X10 X22
E(0.0164159) Y10
E(0.0164159) Y10 X22
E(0.0164159) Z10
E(0.0164159) Z10 X22
M 22
R 22
YCX 3 22
E(0.0164159) X22
E(0.0164159) X3
E(0.0164159) X3 X22
E(0.0164159) Y3
E(0.0164159) Y3 X22
E(0.0164159) Z3
E(0.0164159) Z3 X22
M 22
R 22
YCX 9 22
E(0.0164159) X22
E(0.0164159) X9
E(0.0164159) X9 X22
E(0.0164159) Y9
E(0.0164159) Y9 X22
E(0.0164159) Z9
E(0.0164159) Z9 X22
M 22
R 22
YCX 6 22
E(0.0164159) X22
E(0.0164159) X6
E(0.0164159) X6 X22
E(0.0164159) Y6
E(0.0164159) Y6 X22
E(0.0164159) Z6
E(0.0164159) Z6 X22
M 22
R 22
YCX 11 22
E(0.0164159) X22
E(0.0164159) X11
E(0.0164159) X11 X22
E(0.0164159) Y11
E(0.0164159) Y11 X22
E(0.0164159) Z11
E(0.0164159) Z11 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
}
R 22
XCX 2 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X3
E(0.00415549) X2 X3 X22
E(0.00415549) X2 Y3
E(0.00415549) X2 Y3 X22
E(0.00415549) X2 Z3
E(0.00415549) X2 Z3 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X3
E(0.00415549) Y2 X3 X22
E(0.00415549) Y2 Y3
E(0.00415549) Y2 Y3 X22
E(0.00415549) Y2 Z3
E(0.00415549) Y2 Z3 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X3
E(0.00415549) Z2 X3 X22
E(0.00415549) Z2 Y3
E(0.00415549) Z2 Y3 X22
E(0.00415549) Z2 Z3
E(0.00415549) Z2 Z3 X22
M 22
R 22
XCX 6 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X5
E(0.00415549) X6 X5 X22
E(0.00415549) X6 Y5
E(0.00415549) X6 Y5 X22
E(0.00415549) X6 Z5
E(0.00415549) X6 Z5 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X5
E(0.00415549) Y6 X5 X22
E(0.00415549) Y6 Y5
E(0.00415549) Y6 Y5 X22
E(0.00415549) Y6 Z5
E(0.00415549) Y6 Z5 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X5
E(0.00415549) Z6 X5 X22
E(0.00415549) Z6 Y5
E(0.00415549) Z6 Y5 X22
E(0.00415549) Z6 Z5
E(0.00415549) Z6 Z5 X22
M 22
R 22
XCX 8 22 9 22
E(0.00415549) X22
E(0.00415549) X9
E(0.00415549) X9 X22
E(0.00415549) Y9
E(0.00415549) Y9 X22
E(0.00415549) Z9
E(0.00415549) Z9 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X9
E(0.00415549) X8 X9 X22
E(0.00415549) X8 Y9
E(0.00415549) X8 Y9 X22
E(0.00415549) X8 Z9
E(0.00415549) X8 Z9 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X9
E(0.00415549) Y8 X9 X22
E(0.00415549) Y8 Y9
E(0.00415549) Y8 Y9 X22
E(0.00415549) Y8 Z9
E(0.00415549) Y8 Z9 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X9
E(0.00415549) Z8 X9 X22
E(0.00415549) Z8 Y9
E(0.00415549) Z8 Y9 X22
E(0.00415549) Z8 Z9
E(0.00415549) Z8 Z9 X22
M 22
R 22
XCX 11 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X11
E(0.00415549) X11 X22
E(0.00415549) X11 X10
E(0.00415549) X11 X10 X22
E(0.00415549) X11 Y10
E(0.00415549) X11 Y10 X22
E(0.00415549) X11 Z10
E(0.00415549) X11 Z10 X22
E(0.00415549) Y11
E(0.00415549) Y11 X22
E(0.00415549) Y11 X10
E(0.00415549) Y11 X10 X22
E(0.00415549) Y11 Y10
E(0.00415549) Y11 Y10 X22
E(0.00415549) Y11 Z10
E(0.00415549) Y11 Z10 X22
E(0.00415549) Z11
E(0.00415549) Z11 X22
E(0.00415549) Z11 X10
E(0.00415549) Z11 X10 X22
E(0.00415549) Z11 Y10
E(0.00415549) Z11 Y10 X22
E(0.00415549) Z11 Z10
E(0.00415549) Z11 Z10 X22
M 22
R 22
XCX 0 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X0
E(0.00415549) X0 X22
E(0.00415549) X0 X1
E(0.00415549) X0 X1 X22
E(0.00415549) X0 Y1
E(0.00415549) X0 Y1 X22
E(0.00415549) X0 Z1
E(0.00415549) X0 Z1 X22
E(0.00415549) Y0
E(0.00415549) Y0 X22
E(0.00415549) Y0 X1
E(0.00415549) Y0 X1 X22
E(0.00415549) Y0 Y1
E(0.00415549) Y0 Y1 X22
E(0.00415549) Y0 Z1
E(0.00415549) Y0 Z1 X22
E(0.00415549) Z0
E(0.00415549) Z0 X22
E(0.00415549) Z0 X1
E(0.00415549) Z0 X1 X22
E(0.00415549) Z0 Y1
E(0.00415549) Z0 Y1 X22
E(0.00415549) Z0 Z1
E(0.00415549) Z0 Z1 X22
M 22
R 22
XCX 4 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X7
E(0.00415549) X4 X7 X22
E(0.00415549) X4 Y7
E(0.00415549) X4 Y7 X22
E(0.00415549) X4 Z7
E(0.00415549) X4 Z7 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X7
E(0.00415549) Y4 X7 X22
E(0.00415549) Y4 Y7
E(0.00415549) Y4 Y7 X22
E(0.00415549) Y4 Z7
E(0.00415549) Y4 Z7 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X7
E(0.00415549) Z4 X7 X22
E(0.00415549) Z4 Y7
E(0.00415549) Z4 Y7 X22
E(0.00415549) Z4 Z7
E(0.00415549) Z4 Z7 X22
M 22
SHIFT_COORDS(0, 0, 1)
TICK
R 22
YCX 0 22
E(0.0164159) X22
E(0.0164159) X0
E(0.0164159) X0 X22
E(0.0164159) Y0
E(0.0164159) Y0 X22
E(0.0164159) Z0
E(0.0164159) Z0 X22
M 22
R 22
YCX 2 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X1
E(0.00415549) X2 X1 X22
E(0.00415549) X2 Y1
E(0.00415549) X2 Y1 X22
E(0.00415549) X2 Z1
E(0.00415549) X2 Z1 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X1
E(0.00415549) Y2 X1 X22
E(0.00415549) Y2 Y1
E(0.00415549) Y2 Y1 X22
E(0.00415549) Y2 Z1
E(0.00415549) Y2 Z1 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X1
E(0.00415549) Z2 X1 X22
E(0.00415549) Z2 Y1
E(0.00415549) Z2 Y1 X22
E(0.00415549) Z2 Z1
E(0.00415549) Z2 Z1 X22
M 22
R 22
YCX 4 22
E(0.0164159) X22
E(0.0164159) X4
E(0.0164159) X4 X22
E(0.0164159) Y4
E(0.0164159) Y4 X22
E(0.0164159) Z4
E(0.0164159) Z4 X22
M 22
R 22
YCX 5 22
E(0.0164159) X22
E(0.0164159) X5
E(0.0164159) X5 X22
E(0.0164159) Y5
E(0.0164159) Y5 X22
E(0.0164159) Z5
E(0.0164159) Z5 X22
M 22
R 22
YCX 8 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X7
E(0.00415549) X8 X7 X22
E(0.00415549) X8 Y7
E(0.00415549) X8 Y7 X22
E(0.00415549) X8 Z7
E(0.00415549) X8 Z7 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X7
E(0.00415549) Y8 X7 X22
E(0.00415549) Y8 Y7
E(0.00415549) Y8 Y7 X22
E(0.00415549) Y8 Z7
E(0.00415549) Y8 Z7 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X7
E(0.00415549) Z8 X7 X22
E(0.00415549) Z8 Y7
E(0.00415549) Z8 Y7 X22
E(0.00415549) Z8 Z7
E(0.00415549) Z8 Z7 X22
M 22
R 22
YCX 10 22
E(0.0164159) X22
E(0.0164159) X10
E(0.0164159) X10 X22
E(0.0164159) Y10
E(0.0164159) Y10 X22
E(0.0164159) Z10
E(0.0164159) Z10 X22
M 22
R 22
YCX 3 22
E(0.0164159) X22
E(0.0164159) X3
E(0.0164159) X3 X22
E(0.0164159) Y3
E(0.0164159) Y3 X22
E(0.0164159) Z3
E(0.0164159) Z3 X22
M 22
R 22
YCX 9 22
E(0.0164159) X22
E(0.0164159) X9
E(0.0164159) X9 X22
E(0.0164159) Y9
E(0.0164159) Y9 X22
E(0.0164159) Z9
E(0.0164159) Z9 X22
M 22
R 22
YCX 6 22
E(0.0164159) X22
E(0.0164159) X6
E(0.0164159) X6 X22
E(0.0164159) Y6
E(0.0164159) Y6 X22
E(0.0164159) Z6
E(0.0164159) Z6 X22
M 22
R 22
YCX 11 22
E(0.0164159) X22
E(0.0164159) X11
E(0.0164159) X11 X22
E(0.0164159) Y11
E(0.0164159) Y11 X22
E(0.0164159) Z11
E(0.0164159) Z11 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-24] rec[-22] rec[-19] rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-23] rec[-18] rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-26] rec[-25] rec[-20] rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-21] rec[-17] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
CX 0 22
E(0.0164159) X22
E(0.0164159) X0
E(0.0164159) X0 X22
E(0.0164159) Y0
E(0.0164159) Y0 X22
E(0.0164159) Z0
E(0.0164159) Z0 X22
M 22
R 22
CX 1 22
E(0.0164159) X22
E(0.0164159) X1
E(0.0164159) X1 X22
E(0.0164159) Y1
E(0.0164159) Y1 X22
E(0.0164159) Z1
E(0.0164159) Z1 X22
M 22
R 22
CX 4 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X3
E(0.00415549) X4 X3 X22
E(0.00415549) X4 Y3
E(0.00415549) X4 Y3 X22
E(0.00415549) X4 Z3
E(0.00415549) X4 Z3 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X3
E(0.00415549) Y4 X3 X22
E(0.00415549) Y4 Y3
E(0.00415549) Y4 Y3 X22
E(0.00415549) Y4 Z3
E(0.00415549) Y4 Z3 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X3
E(0.00415549) Z4 X3 X22
E(0.00415549) Z4 Y3
E(0.00415549) Z4 Y3 X22
E(0.00415549) Z4 Z3
E(0.00415549) Z4 Z3 X22
M 22
R 22
CX 6 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X7
E(0.00415549) X6 X7 X22
E(0.00415549) X6 Y7
E(0.00415549) X6 Y7 X22
E(0.00415549) X6 Z7
E(0.00415549) X6 Z7 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X7
E(0.00415549) Y6 X7 X22
E(0.00415549) Y6 Y7
E(0.00415549) Y6 Y7 X22
E(0.00415549) Y6 Z7
E(0.00415549) Y6 Z7 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X7
E(0.00415549) Z6 X7 X22
E(0.00415549) Z6 Y7
E(0.00415549) Z6 Y7 X22
E(0.00415549) Z6 Z7
E(0.00415549) Z6 Z7 X22
M 22
R 22
CX 9 22
E(0.0164159) X22
E(0.0164159) X9
E(0.0164159) X9 X22
E(0.0164159) Y9
E(0.0164159) Y9 X22
E(0.0164159) Z9
E(0.0164159) Z9 X22
M 22
R 22
CX 11 22
E(0.0164159) X22
E(0.0164159) X11
E(0.0164159) X11 X22
E(0.0164159) Y11
E(0.0164159) Y11 X22
E(0.0164159) Z11
E(0.0164159) Z11 X22
M 22
R 22
CX 2 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X5
E(0.00415549) X2 X5 X22
E(0.00415549) X2 Y5
E(0.00415549) X2 Y5 X22
E(0.00415549) X2 Z5
E(0.00415549) X2 Z5 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X5
E(0.00415549) Y2 X5 X22
E(0.00415549) Y2 Y5
E(0.00415549) Y2 Y5 X22
E(0.00415549) Y2 Z5
E(0.00415549) Y2 Z5 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X5
E(0.00415549) Z2 X5 X22
E(0.00415549) Z2 Y5
E(0.00415549) Z2 Y5 X22
E(0.00415549) Z2 Z5
E(0.00415549) Z2 Z5 X22
M 22
R 22
CX 8 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X10
E(0.00415549) X8 X10 X22
E(0.00415549) X8 Y10
E(0.00415549) X8 Y10 X22
E(0.00415549) X8 Z10
E(0.00415549) X8 Z10 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X10
E(0.00415549) Y8 X10 X22
E(0.00415549) Y8 Y10
E(0.00415549) Y8 Y10 X22
E(0.00415549) Y8 Z10
E(0.00415549) Y8 Z10 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X10
E(0.00415549) Z8 X10 X22
E(0.00415549) Z8 Y10
E(0.00415549) Z8 Y10 X22
E(0.00415549) Z8 Z10
E(0.00415549) Z8 Z10 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
XCX 2 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X3
E(0.00415549) X2 X3 X22
E(0.00415549) X2 Y3
E(0.00415549) X2 Y3 X22
E(0.00415549) X2 Z3
E(0.00415549) X2 Z3 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X3
E(0.00415549) Y2 X3 X22
E(0.00415549) Y2 Y3
E(0.00415549) Y2 Y3 X22
E(0.00415549) Y2 Z3
E(0.00415549) Y2 Z3 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X3
E(0.00415549) Z2 X3 X22
E(0.00415549) Z2 Y3
E(0.00415549) Z2 Y3 X22
E(0.00415549) Z2 Z3
E(0.00415549) Z2 Z3 X22
M 22
R 22
XCX 6 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X5
E(0.00415549) X6 X5 X22
E(0.00415549) X6 Y5
E(0.00415549) X6 Y5 X22
E(0.00415549) X6 Z5
E(0.00415549) X6 Z5 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X5
E(0.00415549) Y6 X5 X22
E(0.00415549) Y6 Y5
E(0.00415549) Y6 Y5 X22
E(0.00415549) Y6 Z5
E(0.00415549) Y6 Z5 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X5
E(0.00415549) Z6 X5 X22
E(0.00415549) Z6 Y5
E(0.00415549) Z6 Y5 X22
E(0.00415549) Z6 Z5
E(0.00415549) Z6 Z5 X22
M 22
R 22
XCX 8 22 9 22
E(0.00415549) X22
E(0.00415549) X9
E(0.00415549) X9 X22
E(0.00415549) Y9
E(0.00415549) Y9 X22
E(0.00415549) Z9
E(0.00415549) Z9 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X9
E(0.00415549) X8 X9 X22
E(0.00415549) X8 Y9
E(0.00415549) X8 Y9 X22
E(0.00415549) X8 Z9
E(0.00415549) X8 Z9 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X9
E(0.00415549) Y8 X9 X22
E(0.00415549) Y8 Y9
E(0.00415549) Y8 Y9 X22
E(0.00415549) Y8 Z9
E(0.00415549) Y8 Z9 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X9
E(0.00415549) Z8 X9 X22
E(0.00415549) Z8 Y9
E(0.00415549) Z8 Y9 X22
E(0.00415549) Z8 Z9
E(0.00415549) Z8 Z9 X22
M 22
R 22
XCX 11 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X11
E(0.00415549) X11 X22
E(0.00415549) X11 X10
E(0.00415549) X11 X10 X22
E(0.00415549) X11 Y10
E(0.00415549) X11 Y10 X22
E(0.00415549) X11 Z10
E(0.00415549) X11 Z10 X22
E(0.00415549) Y11
E(0.00415549) Y11 X22
E(0.00415549) Y11 X10
E(0.00415549) Y11 X10 X22
E(0.00415549) Y11 Y10
E(0.00415549) Y11 Y10 X22
E(0.00415549) Y11 Z10
E(0.00415549) Y11 Z10 X22
E(0.00415549) Z11
E(0.00415549) Z11 X22
E(0.00415549) Z11 X10
E(0.00415549) Z11 X10 X22
E(0.00415549) Z11 Y10
E(0.00415549) Z11 Y10 X22
E(0.00415549) Z11 Z10
E(0.00415549) Z11 Z10 X22
M 22
R 22
XCX 0 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X0
E(0.00415549) X0 X22
E(0.00415549) X0 X1
E(0.00415549) X0 X1 X22
E(0.00415549) X0 Y1
E(0.00415549) X0 Y1 X22
E(0.00415549) X0 Z1
E(0.00415549) X0 Z1 X22
E(0.00415549) Y0
E(0.00415549) Y0 X22
E(0.00415549) Y0 X1
E(0.00415549) Y0 X1 X22
E(0.00415549) Y0 Y1
E(0.00415549) Y0 Y1 X22
E(0.00415549) Y0 Z1
E(0.00415549) Y0 Z1 X22
E(0.00415549) Z0
E(0.00415549) Z0 X22
E(0.00415549) Z0 X1
E(0.00415549) Z0 X1 X22
E(0.00415549) Z0 Y1
E(0.00415549) Z0 Y1 X22
E(0.00415549) Z0 Z1
E(0.00415549) Z0 Z1 X22
M 22
R 22
XCX 4 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X7
E(0.00415549) X4 X7 X22
E(0.00415549) X4 Y7
E(0.00415549) X4 Y7 X22
E(0.00415549) X4 Z7
E(0.00415549) X4 Z7 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X7
E(0.00415549) Y4 X7 X22
E(0.00415549) Y4 Y7
E(0.00415549) Y4 Y7 X22
E(0.00415549) Y4 Z7
E(0.00415549) Y4 Z7 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X7
E(0.00415549) Z4 X7 X22
E(0.00415549) Z4 Y7
E(0.00415549) Z4 Y7 X22
E(0.00415549) Z4 Z7
E(0.00415549) Z4 Z7 X22
M 22
DETECTOR(1.5, 2, 0) rec[-54] rec[-53] rec[-49] rec[-46] rec[-45] rec[-42] rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
CX 0 22
E(0.0164159) X22
E(0.0164159) X0
E(0.0164159) X0 X22
E(0.0164159) Y0
E(0.0164159) Y0 X22
E(0.0164159) Z0
E(0.0164159) Z0 X22
M 22
R 22
CX 1 22
E(0.0164159) X22
E(0.0164159) X1
E(0.0164159) X1 X22
E(0.0164159) Y1
E(0.0164159) Y1 X22
E(0.0164159) Z1
E(0.0164159) Z1 X22
M 22
R 22
CX 4 22 3 22
E(0.00415549) X22
E(0.00415549) X3
E(0.00415549) X3 X22
E(0.00415549) Y3
E(0.00415549) Y3 X22
E(0.00415549) Z3
E(0.00415549) Z3 X22
E(0.00415549) X4
E(0.00415549) X4 X22
E(0.00415549) X4 X3
E(0.00415549) X4 X3 X22
E(0.00415549) X4 Y3
E(0.00415549) X4 Y3 X22
E(0.00415549) X4 Z3
E(0.00415549) X4 Z3 X22
E(0.00415549) Y4
E(0.00415549) Y4 X22
E(0.00415549) Y4 X3
E(0.00415549) Y4 X3 X22
E(0.00415549) Y4 Y3
E(0.00415549) Y4 Y3 X22
E(0.00415549) Y4 Z3
E(0.00415549) Y4 Z3 X22
E(0.00415549) Z4
E(0.00415549) Z4 X22
E(0.00415549) Z4 X3
E(0.00415549) Z4 X3 X22
E(0.00415549) Z4 Y3
E(0.00415549) Z4 Y3 X22
E(0.00415549) Z4 Z3
E(0.00415549) Z4 Z3 X22
M 22
R 22
CX 6 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X6
E(0.00415549) X6 X22
E(0.00415549) X6 X7
E(0.00415549) X6 X7 X22
E(0.00415549) X6 Y7
E(0.00415549) X6 Y7 X22
E(0.00415549) X6 Z7
E(0.00415549) X6 Z7 X22
E(0.00415549) Y6
E(0.00415549) Y6 X22
E(0.00415549) Y6 X7
E(0.00415549) Y6 X7 X22
E(0.00415549) Y6 Y7
E(0.00415549) Y6 Y7 X22
E(0.00415549) Y6 Z7
E(0.00415549) Y6 Z7 X22
E(0.00415549) Z6
E(0.00415549) Z6 X22
E(0.00415549) Z6 X7
E(0.00415549) Z6 X7 X22
E(0.00415549) Z6 Y7
E(0.00415549) Z6 Y7 X22
E(0.00415549) Z6 Z7
E(0.00415549) Z6 Z7 X22
M 22
R 22
CX 9 22
E(0.0164159) X22
E(0.0164159) X9
E(0.0164159) X9 X22
E(0.0164159) Y9
E(0.0164159) Y9 X22
E(0.0164159) Z9
E(0.0164159) Z9 X22
M 22
R 22
CX 11 22
E(0.0164159) X22
E(0.0164159) X11
E(0.0164159) X11 X22
E(0.0164159) Y11
E(0.0164159) Y11 X22
E(0.0164159) Z11
E(0.0164159) Z11 X22
M 22
R 22
CX 2 22 5 22
E(0.00415549) X22
E(0.00415549) X5
E(0.00415549) X5 X22
E(0.00415549) Y5
E(0.00415549) Y5 X22
E(0.00415549) Z5
E(0.00415549) Z5 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X5
E(0.00415549) X2 X5 X22
E(0.00415549) X2 Y5
E(0.00415549) X2 Y5 X22
E(0.00415549) X2 Z5
E(0.00415549) X2 Z5 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X5
E(0.00415549) Y2 X5 X22
E(0.00415549) Y2 Y5
E(0.00415549) Y2 Y5 X22
E(0.00415549) Y2 Z5
E(0.00415549) Y2 Z5 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X5
E(0.00415549) Z2 X5 X22
E(0.00415549) Z2 Y5
E(0.00415549) Z2 Y5 X22
E(0.00415549) Z2 Z5
E(0.00415549) Z2 Z5 X22
M 22
R 22
CX 8 22 10 22
E(0.00415549) X22
E(0.00415549) X10
E(0.00415549) X10 X22
E(0.00415549) Y10
E(0.00415549) Y10 X22
E(0.00415549) Z10
E(0.00415549) Z10 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X10
E(0.00415549) X8 X10 X22
E(0.00415549) X8 Y10
E(0.00415549) X8 Y10 X22
E(0.00415549) X8 Z10
E(0.00415549) X8 Z10 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X10
E(0.00415549) Y8 X10 X22
E(0.00415549) Y8 Y10
E(0.00415549) Y8 Y10 X22
E(0.00415549) Y8 Z10
E(0.00415549) Y8 Z10 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X10
E(0.00415549) Z8 X10 X22
E(0.00415549) Z8 Y10
E(0.00415549) Z8 Y10 X22
E(0.00415549) Z8 Z10
E(0.00415549) Z8 Z10 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
YCX 0 22
E(0.0164159) X22
E(0.0164159) X0
E(0.0164159) X0 X22
E(0.0164159) Y0
E(0.0164159) Y0 X22
E(0.0164159) Z0
E(0.0164159) Z0 X22
M 22
R 22
YCX 2 22 1 22
E(0.00415549) X22
E(0.00415549) X1
E(0.00415549) X1 X22
E(0.00415549) Y1
E(0.00415549) Y1 X22
E(0.00415549) Z1
E(0.00415549) Z1 X22
E(0.00415549) X2
E(0.00415549) X2 X22
E(0.00415549) X2 X1
E(0.00415549) X2 X1 X22
E(0.00415549) X2 Y1
E(0.00415549) X2 Y1 X22
E(0.00415549) X2 Z1
E(0.00415549) X2 Z1 X22
E(0.00415549) Y2
E(0.00415549) Y2 X22
E(0.00415549) Y2 X1
E(0.00415549) Y2 X1 X22
E(0.00415549) Y2 Y1
E(0.00415549) Y2 Y1 X22
E(0.00415549) Y2 Z1
E(0.00415549) Y2 Z1 X22
E(0.00415549) Z2
E(0.00415549) Z2 X22
E(0.00415549) Z2 X1
E(0.00415549) Z2 X1 X22
E(0.00415549) Z2 Y1
E(0.00415549) Z2 Y1 X22
E(0.00415549) Z2 Z1
E(0.00415549) Z2 Z1 X22
M 22
R 22
YCX 4 22
E(0.0164159) X22
E(0.0164159) X4
E(0.0164159) X4 X22
E(0.0164159) Y4
E(0.0164159) Y4 X22
E(0.0164159) Z4
E(0.0164159) Z4 X22
M 22
R 22
YCX 5 22
E(0.0164159) X22
E(0.0164159) X5
E(0.0164159) X5 X22
E(0.0164159) Y5
E(0.0164159) Y5 X22
E(0.0164159) Z5
E(0.0164159) Z5 X22
M 22
R 22
YCX 8 22 7 22
E(0.00415549) X22
E(0.00415549) X7
E(0.00415549) X7 X22
E(0.00415549) Y7
E(0.00415549) Y7 X22
E(0.00415549) Z7
E(0.00415549) Z7 X22
E(0.00415549) X8
E(0.00415549) X8 X22
E(0.00415549) X8 X7
E(0.00415549) X8 X7 X22
E(0.00415549) X8 Y7
E(0.00415549) X8 Y7 X22
E(0.00415549) X8 Z7
E(0.00415549) X8 Z7 X22
E(0.00415549) Y8
E(0.00415549) Y8 X22
E(0.00415549) Y8 X7
E(0.00415549) Y8 X7 X22
E(0.00415549) Y8 Y7
E(0.00415549) Y8 Y7 X22
E(0.00415549) Y8 Z7
E(0.00415549) Y8 Z7 X22
E(0.00415549) Z8
E(0.00415549) Z8 X22
E(0.00415549) Z8 X7
E(0.00415549) Z8 X7 X22
E(0.00415549) Z8 Y7
E(0.00415549) Z8 Y7 X22
E(0.00415549) Z8 Z7
E(0.00415549) Z8 Z7 X22
M 22
R 22
YCX 10 22
E(0.0164159) X22
E(0.0164159) X10
E(0.0164159) X10 X22
E(0.0164159) Y10
E(0.0164159) Y10 X22
E(0.0164159) Z10
E(0.0164159) Z10 X22
M 22
R 22
YCX 3 22
E(0.0164159) X22
E(0.0164159) X3
E(0.0164159) X3 X22
E(0.0164159) Y3
E(0.0164159) Y3 X22
E(0.0164159) Z3
E(0.0164159) Z3 X22
M 22
R 22
YCX 9 22
E(0.0164159) X22
E(0.0164159) X9
E(0.0164159) X9 X22
E(0.0164159) Y9
E(0.0164159) Y9 X22
E(0.0164159) Z9
E(0.0164159) Z9 X22
M 22
R 22
YCX 6 22
E(0.0164159) X22
E(0.0164159) X6
E(0.0164159) X6 X22
E(0.0164159) Y6
E(0.0164159) Y6 X22
E(0.0164159) Z6
E(0.0164159) Z6 X22
M 22
R 22
YCX 11 22
E(0.0164159) X22
E(0.0164159) X11
E(0.0164159) X11 X22
E(0.0164159) Y11
E(0.0164159) Y11 X22
E(0.0164159) Z11
E(0.0164159) Z11 X22
M 22
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
R 22
YCX 0 22
E(0.0164159) X22
E(0.0164159) X0
E(0.0164159) X0 X22
E(0.0164159) Y0
E(0.0164159) Y0 X22
E(0.0164159) Z0
E(0.0164159) Z0 X22
M 22
R 22
YCX 1 22
E(0.0164159) X22
E(0.0164159) X1
E(0.0164159) X1 X22
E(0.0164159) Y1
E(0.0164159) Y1 X22
E(0.0164159) Z1
E(0.0164159) Z1 X22
M 22
R 22
YCX 2 22
E(0.0164159) X22
E(0.0164159) X2
E(0.0164159) X2 X22
E(0.0164159) Y2
E(0.0164159) Y2 X22
E(0.0164159) Z2
E(0.0164159) Z2 X22
M 22
R 22
YCX 3 22
E(0.0164159) X22
E(0.0164159) X3
E(0.0164159) X3 X22
E(0.0164159) Y3
E(0.0164159) Y3 X22
E(0.0164159) Z3
E(0.0164159) Z3 X22
M 22
R 22
YCX 4 22
E(0.0164159) X22
E(0.0164159) X4
E(0.0164159) X4 X22
E(0.0164159) Y4
E(0.0164159) Y4 X22
E(0.0164159) Z4
E(0.0164159) Z4 X22
M 22
R 22
YCX 5 22
E(0.0164159) X22
E(0.0164159) X5
E(0.0164159) X5 X22
E(0.0164159) Y5
E(0.0164159) Y5 X22
E(0.0164159) Z5
E(0.0164159) Z5 X22
M 22
R 22
YCX 6 22
E(0.0164159) X22
E(0.0164159) X6
E(0.0164159) X6 X22
E(0.0164159) Y6
E(0.0164159) Y6 X22
E(0.0164159) Z6
E(0.0164159) Z6 X22
M 22
R 22
YCX 7 22
E(0.0164159) X22
E(0.0164159) X7
E(0.0164159) X7 X22
E(0.0164159) Y7
E(0.0164159) Y7 X22
E(0.0164159) Z7
E(0.0164159) Z7 X22
M 22
R 22
YCX 8 22
E(0.0164159) X22
E(0.0164159) X8
E(0.0164159) X8 X22
E(0.0164159) Y8
E(0.0164159) Y8 X22
E(0.0164159) Z8
E(0.0164159) Z8 X22
M 22
R 22
YCX 9 22
E(0.0164159) X22
E(0.0164159) X9
E(0.0164159) X9 X22
E(0.0164159) Y9
E(0.0164159) Y9 X22
E(0.0164159) Z9
E(0.0164159) Z9 X22
M 22
R 22
YCX 10 22
E(0.0164159) X22
E(0.0164159) X10
E(0.0164159) X10 X22
E(0.0164159) Y10
E(0.0164159) Y10 X22
E(0.0164159) Z10
E(0.0164159) Z10 X22
M 22
R 22
YCX 11 22
E(0.0164159) X22
E(0.0164159) X11
E(0.0164159) X11 X22
E(0.0164159) Y11
E(0.0164159) Y11 X22
E(0.0164159) Z11
E(0.0164159) Z11 X22
M 22
DETECTOR(0, 0.5, 0) rec[-22] rec[-12]
DETECTOR(1, 0.5, 0) rec[-21] rec[-11] rec[-10]
DETECTOR(1, 3.5, 0) rec[-20] rec[-8]
DETECTOR(2, 0.5, 0) rec[-19] rec[-7]
DETECTOR(2, 3.5, 0) rec[-18] rec[-5] rec[-4]
DETECTOR(3, 3.5, 0) rec[-17] rec[-2]
DETECTOR(0.5, 2, 0) rec[-16] rec[-9]
DETECTOR(1.5, 5, 0) rec[-15] rec[-3]
DETECTOR(2.5, 2, 0) rec[-14] rec[-6]
DETECTOR(3.5, 5, 0) rec[-13] rec[-1]
DETECTOR(1.5, 2, 0) rec[-36] rec[-35] rec[-31] rec[-28] rec[-27] rec[-24] rec[-10] rec[-9] rec[-8] rec[-7] rec[-6] rec[-5]
OBSERVABLE_INCLUDE(1) rec[-4] rec[-3] rec[-2] rec[-1]
TICK
""")
def test_exact_circuit_SDEM3_H():
layout = HoneycombLayout(data_width=2,
data_height=6,
rounds=100,
noise_level=0.125,
noisy_gate_set='SDEM3',
tested_observable='H',
sheared=True)
assert layout.ideal_and_noisy_circuit[1] == stim.Circuit("""
QUBIT_COORDS(0, 0) 0
QUBIT_COORDS(1, 0) 1
QUBIT_COORDS(1, 1) 2
QUBIT_COORDS(1, 2) 3
QUBIT_COORDS(1, 3) 4
QUBIT_COORDS(2, 1) 5
QUBIT_COORDS(2, 2) 6
QUBIT_COORDS(2, 3) 7
QUBIT_COORDS(2, 4) 8
QUBIT_COORDS(2, 5) 9
QUBIT_COORDS(3, 4) 10
QUBIT_COORDS(3, 5) 11
R 0 1 2 3 4 5 6 7 8 9 10 11
X_ERROR(0.0625) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
H_YZ 0 1 2 3 4 5 6 7 8 9 10 11
TICK
DEPOLARIZE2(0.125) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 4 5 10 3 9 6 11
DEPOLARIZE2(0.125) 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 1 9 11
DEPOLARIZE2(0.125) 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE2(0.125) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
DETECTOR(1.5, 2, 0) rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 1 9 11
DEPOLARIZE2(0.125) 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 4 5 10 3 9 6 11
DEPOLARIZE2(0.125) 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
REPEAT 48 {
DEPOLARIZE2(0.125) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 4 5 10 3 9 6 11
DEPOLARIZE2(0.125) 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-24] rec[-22] rec[-19] rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-23] rec[-18] rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-26] rec[-25] rec[-20] rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-21] rec[-17] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 1 9 11
DEPOLARIZE2(0.125) 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE2(0.125) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
DETECTOR(1.5, 2, 0) rec[-54] rec[-53] rec[-49] rec[-46] rec[-45] rec[-42] rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 1 9 11
DEPOLARIZE2(0.125) 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 4 5 10 3 9 6 11
DEPOLARIZE2(0.125) 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
}
DEPOLARIZE2(0.125) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 4 5 10 3 9 6 11
DEPOLARIZE2(0.125) 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-24] rec[-22] rec[-19] rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-23] rec[-18] rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-26] rec[-25] rec[-20] rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-21] rec[-17] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 1 9 11
DEPOLARIZE2(0.125) 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE2(0.125) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
DETECTOR(1.5, 2, 0) rec[-54] rec[-53] rec[-49] rec[-46] rec[-45] rec[-42] rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 1 9 11
DEPOLARIZE2(0.125) 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 4 5 10 3 9 6 11
DEPOLARIZE2(0.125) 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
DEPOLARIZE1(0.125) 0 1 2 3 4 5 6 7 8 9 10 11
MPP(0.125) Y0 Y1 Y2 Y3 Y4 Y5 Y6 Y7 Y8 Y9 Y10 Y11
DETECTOR(0, 0.5, 0) rec[-22] rec[-12]
DETECTOR(1, 0.5, 0) rec[-21] rec[-11] rec[-10]
DETECTOR(1, 3.5, 0) rec[-20] rec[-8]
DETECTOR(2, 0.5, 0) rec[-19] rec[-7]
DETECTOR(2, 3.5, 0) rec[-18] rec[-5] rec[-4]
DETECTOR(3, 3.5, 0) rec[-17] rec[-2]
DETECTOR(0.5, 2, 0) rec[-16] rec[-9]
DETECTOR(1.5, 5, 0) rec[-15] rec[-3]
DETECTOR(2.5, 2, 0) rec[-14] rec[-6]
DETECTOR(3.5, 5, 0) rec[-13] rec[-1]
DETECTOR(1.5, 2, 0) rec[-36] rec[-35] rec[-31] rec[-28] rec[-27] rec[-24] rec[-10] rec[-9] rec[-8] rec[-7] rec[-6] rec[-5]
OBSERVABLE_INCLUDE(1) rec[-4] rec[-3] rec[-2] rec[-1]
TICK
""")
def test_exact_circuit_SIEM3000_H():
layout = HoneycombLayout(data_width=2,
data_height=6,
rounds=100,
noise_level=0.125,
noisy_gate_set='SIEM3000',
tested_observable='H',
sheared=True)
assert layout.ideal_and_noisy_circuit[1] == stim.Circuit("""
QUBIT_COORDS(0, 0) 0
QUBIT_COORDS(1, 0) 1
QUBIT_COORDS(1, 1) 2
QUBIT_COORDS(1, 2) 3
QUBIT_COORDS(1, 3) 4
QUBIT_COORDS(2, 1) 5
QUBIT_COORDS(2, 2) 6
QUBIT_COORDS(2, 3) 7
QUBIT_COORDS(2, 4) 8
QUBIT_COORDS(2, 5) 9
QUBIT_COORDS(3, 4) 10
QUBIT_COORDS(3, 5) 11
R 0 1 2 3 4 5 6 7 8 9 10 11
X_ERROR(0.0625) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
H_YZ 0 1 2 3 4 5 6 7 8 9 10 11
TICK
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0.125, 0, 0, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0) 2 3 6 5 8 9 11 10 0 1 4 7
PAULI_CHANNEL_1(0, 0.125, 0) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_1(0, 0.125, 0) 0 4 5 10 3 9 6 11
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0, 0, 0, 1e-15, 0, 0.125, 0, 1e-15, 0, 0, 0) 2 1 8 7
PAULI_CHANNEL_1(0, 0, 0.125) 0 4 5 10 3 9 6 11 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_1(0, 0, 0.125) 0 1 9 11
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0.125) 4 3 6 7 2 5 8 10
PAULI_CHANNEL_1(0.125, 0, 0) 0 1 9 11 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0.125, 0, 0, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0) 2 3 6 5 8 9 11 10 0 1 4 7
PAULI_CHANNEL_1(0, 0.125, 0) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
DETECTOR(1.5, 2, 0) rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_1(0, 0, 0.125) 0 1 9 11
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0.125) 4 3 6 7 2 5 8 10
PAULI_CHANNEL_1(0.125, 0, 0) 0 1 9 11 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_1(0, 0.125, 0) 0 4 5 10 3 9 6 11
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0, 0, 0, 1e-15, 0, 0.125, 0, 1e-15, 0, 0, 0) 2 1 8 7
PAULI_CHANNEL_1(0, 0, 0.125) 0 4 5 10 3 9 6 11 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
REPEAT 48 {
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0.125, 0, 0, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0) 2 3 6 5 8 9 11 10 0 1 4 7
PAULI_CHANNEL_1(0, 0.125, 0) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_1(0, 0.125, 0) 0 4 5 10 3 9 6 11
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0, 0, 0, 1e-15, 0, 0.125, 0, 1e-15, 0, 0, 0) 2 1 8 7
PAULI_CHANNEL_1(0, 0, 0.125) 0 4 5 10 3 9 6 11 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-24] rec[-22] rec[-19] rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-23] rec[-18] rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-26] rec[-25] rec[-20] rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-21] rec[-17] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_1(0, 0, 0.125) 0 1 9 11
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0.125) 4 3 6 7 2 5 8 10
PAULI_CHANNEL_1(0.125, 0, 0) 0 1 9 11 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0.125, 0, 0, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0) 2 3 6 5 8 9 11 10 0 1 4 7
PAULI_CHANNEL_1(0, 0.125, 0) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
DETECTOR(1.5, 2, 0) rec[-54] rec[-53] rec[-49] rec[-46] rec[-45] rec[-42] rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_1(0, 0, 0.125) 0 1 9 11
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0.125) 4 3 6 7 2 5 8 10
PAULI_CHANNEL_1(0.125, 0, 0) 0 1 9 11 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_1(0, 0.125, 0) 0 4 5 10 3 9 6 11
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0, 0, 0, 1e-15, 0, 0.125, 0, 1e-15, 0, 0, 0) 2 1 8 7
PAULI_CHANNEL_1(0, 0, 0.125) 0 4 5 10 3 9 6 11 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
}
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0.125, 0, 0, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0) 2 3 6 5 8 9 11 10 0 1 4 7
PAULI_CHANNEL_1(0, 0.125, 0) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_1(0, 0.125, 0) 0 4 5 10 3 9 6 11
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0, 0, 0, 1e-15, 0, 0.125, 0, 1e-15, 0, 0, 0) 2 1 8 7
PAULI_CHANNEL_1(0, 0, 0.125) 0 4 5 10 3 9 6 11 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(1.5, 4, 0) rec[-24] rec[-22] rec[-19] rec[-8] rec[-6] rec[-3]
DETECTOR(2.5, 1, 0) rec[-23] rec[-18] rec[-7] rec[-2]
DETECTOR(0.5, 1, 0) rec[-26] rec[-25] rec[-20] rec[-10] rec[-9] rec[-4]
DETECTOR(3.5, 4, 0) rec[-21] rec[-17] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_1(0, 0, 0.125) 0 1 9 11
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0.125) 4 3 6 7 2 5 8 10
PAULI_CHANNEL_1(0.125, 0, 0) 0 1 9 11 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0.125, 0, 0, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0) 2 3 6 5 8 9 11 10 0 1 4 7
PAULI_CHANNEL_1(0, 0.125, 0) 2 3 6 5 8 9 11 10 0 1 4 7
MPP(0.125) X2*X3 X6*X5 X8*X9 X11*X10 X0*X1 X4*X7
DETECTOR(1.5, 2, 0) rec[-54] rec[-53] rec[-49] rec[-46] rec[-45] rec[-42] rec[-12] rec[-11] rec[-8] rec[-6] rec[-5] rec[-1]
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_1(0, 0, 0.125) 0 1 9 11
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0, 1e-15, 0, 0, 0.125) 4 3 6 7 2 5 8 10
PAULI_CHANNEL_1(0.125, 0, 0) 0 1 9 11 4 3 6 7 2 5 8 10
MPP(0.125) Z0 Z1 Z4*Z3 Z6*Z7 Z9 Z11 Z2*Z5 Z8*Z10
OBSERVABLE_INCLUDE(1) rec[-1]
DETECTOR(1.5, 2, 0) rec[-20] rec[-19] rec[-16] rec[-6] rec[-5] rec[-2]
DETECTOR(2.5, 5, 0) rec[-18] rec[-17] rec[-15] rec[-4] rec[-3] rec[-1]
DETECTOR(0.5, -1, 0) rec[-22] rec[-21] rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_1(0, 0.125, 0) 0 4 5 10 3 9 6 11
PAULI_CHANNEL_2(1e-15, 1e-15, 1e-15, 1e-15, 0, 0, 0, 1e-15, 0, 0.125, 0, 1e-15, 0, 0, 0) 2 1 8 7
PAULI_CHANNEL_1(0, 0, 0.125) 0 4 5 10 3 9 6 11 2 1 8 7
MPP(0.125) Y0 Y2*Y1 Y4 Y5 Y8*Y7 Y10 Y3 Y9 Y6 Y11
OBSERVABLE_INCLUDE(1) rec[-3] rec[-1]
DETECTOR(0.5, 3, 0) rec[-40] rec[-36] rec[-30] rec[-16] rec[-8] rec[-4]
DETECTOR(2.5, 3, 0) rec[-38] rec[-37] rec[-34] rec[-29] rec[-25] rec[-15] rec[-11] rec[-6] rec[-5] rec[-2]
SHIFT_COORDS(0, 0, 1)
TICK
PAULI_CHANNEL_1(0, 0.125, 0) 0 1 2 3 4 5 6 7 8 9 10 11
PAULI_CHANNEL_1(0, 0, 0.125) 0 1 2 3 4 5 6 7 8 9 10 11
MPP(0.125) Y0 Y1 Y2 Y3 Y4 Y5 Y6 Y7 Y8 Y9 Y10 Y11
DETECTOR(0, 0.5, 0) rec[-22] rec[-12]
DETECTOR(1, 0.5, 0) rec[-21] rec[-11] rec[-10]
DETECTOR(1, 3.5, 0) rec[-20] rec[-8]
DETECTOR(2, 0.5, 0) rec[-19] rec[-7]
DETECTOR(2, 3.5, 0) rec[-18] rec[-5] rec[-4]
DETECTOR(3, 3.5, 0) rec[-17] rec[-2]
DETECTOR(0.5, 2, 0) rec[-16] rec[-9]
DETECTOR(1.5, 5, 0) rec[-15] rec[-3]
DETECTOR(2.5, 2, 0) rec[-14] rec[-6]
DETECTOR(3.5, 5, 0) rec[-13] rec[-1]
DETECTOR(1.5, 2, 0) rec[-36] rec[-35] rec[-31] rec[-28] rec[-27] rec[-24] rec[-10] rec[-9] rec[-8] rec[-7] rec[-6] rec[-5]
OBSERVABLE_INCLUDE(1) rec[-4] rec[-3] rec[-2] rec[-1]
TICK
""")
def test_exact_circuit_SD6_V():
layout = HoneycombLayout(data_width=2,
data_height=6,
rounds=100,
noise_level=0.125,
noisy_gate_set='SD6',
tested_observable='V',
sheared=False)
assert layout.ideal_and_noisy_circuit[1] == stim.Circuit("""
QUBIT_COORDS(0, 0) 0
QUBIT_COORDS(0, 4) 1
QUBIT_COORDS(0, 5) 2
QUBIT_COORDS(1, 0) 3
QUBIT_COORDS(1, 1) 4
QUBIT_COORDS(1, 2) 5
QUBIT_COORDS(1, 3) 6
QUBIT_COORDS(1, 4) 7
QUBIT_COORDS(1, 5) 8
QUBIT_COORDS(2, 1) 9
QUBIT_COORDS(2, 2) 10
QUBIT_COORDS(2, 3) 11
QUBIT_COORDS(0, -0.5) 12
QUBIT_COORDS(0, 0.5) 13
QUBIT_COORDS(0, 3.5) 14
QUBIT_COORDS(0, 4.5) 15
QUBIT_COORDS(0, 5.5) 16
QUBIT_COORDS(1, -0.5) 17
QUBIT_COORDS(1, 0.5) 18
QUBIT_COORDS(1, 1.5) 19
QUBIT_COORDS(1, 2.5) 20
QUBIT_COORDS(1, 3.5) 21
QUBIT_COORDS(1, 4.5) 22
QUBIT_COORDS(1, 5.5) 23
QUBIT_COORDS(2, 0.5) 24
QUBIT_COORDS(2, 1.5) 25
QUBIT_COORDS(2, 2.5) 26
QUBIT_COORDS(2, 3.5) 27
QUBIT_COORDS(-0.5, 5) 28
QUBIT_COORDS(0.5, 0) 29
QUBIT_COORDS(0.5, 2) 30
QUBIT_COORDS(0.5, 4) 31
QUBIT_COORDS(1.5, 1) 32
QUBIT_COORDS(1.5, 3) 33
QUBIT_COORDS(1.5, 5) 34
QUBIT_COORDS(2.5, 2) 35
R 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 0 1 2 3 4 5 6 7 8 9 10 11
X_ERROR(0.125) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 0 1 2 3 4 5 6 7 8 9 10 11
TICK
CX 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE2(0.125) 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE1(0.125) 2 3 5 7 9 11 12 13 14 16 17 18 20 21 23 24 26 27 28 30 31 32 34 35
TICK
R 13 14 18 21 24 27 28 30 34 35
CX 2 15 5 19 7 22 9 25 3 29 11 33
C_ZYX 0 1 4 6 8 10
X_ERROR(0.125) 13 14 18 21 24 27 28 30 34 35
DEPOLARIZE2(0.125) 2 15 5 19 7 22 9 25 3 29 11 33
DEPOLARIZE1(0.125) 0 1 4 6 8 10 12 16 17 20 23 26 31 32
TICK
X_ERROR(0.125) 15 19 22 25 29 33
CX 0 13 1 14 4 18 6 21 8 34 10 35
C_ZYX 2 3 5 7 9 11
M 15 19 22 25 29 33
DETECTOR(0, 4.5, 0) rec[-6]
DETECTOR(1, 1.5, 0) rec[-5]
DETECTOR(1, 4.5, 0) rec[-4]
DETECTOR(2, 1.5, 0) rec[-3]
DETECTOR(0.5, 0, 0) rec[-2]
DETECTOR(1.5, 3, 0) rec[-1]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 0 13 1 14 4 18 6 21 8 34 10 35
DEPOLARIZE1(0.125) 2 3 5 7 9 11 12 16 17 20 23 24 26 27 28 30 31 32
TICK
R 12 16 17 20 23 26 31 32
CX 3 18 7 21 9 24 11 27 2 28 5 30
C_ZYX 0 1 4 6 8 10
X_ERROR(0.125) 12 16 17 20 23 26 31 32
DEPOLARIZE2(0.125) 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE1(0.125) 0 1 4 6 8 10 13 14 15 19 22 25 29 33 34 35
TICK
X_ERROR(0.125) 13 14 18 21 24 27 28 30 34 35
CX 0 12 6 20 8 23 10 26 1 31 4 32
C_ZYX 2 3 5 7 9 11
M 13 14 18 21 24 27 28 30 34 35
OBSERVABLE_INCLUDE(0) rec[-8] rec[-7]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 0 12 6 20 8 23 10 26 1 31 4 32
DEPOLARIZE1(0.125) 2 3 5 7 9 11 15 16 17 19 22 25 29 33
TICK
R 15 19 22 25 29 33
CX 2 16 3 17 5 20 11 26 7 31 9 32
C_ZYX 0 1 4 6 8 10
X_ERROR(0.125) 15 19 22 25 29 33
DEPOLARIZE2(0.125) 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE1(0.125) 0 1 4 6 8 10 12 13 14 18 21 23 24 27 28 30 34 35
TICK
X_ERROR(0.125) 12 16 17 20 23 26 31 32
CX 1 15 4 19 8 22 10 25 0 29 6 33
C_ZYX 2 3 5 7 9 11
M 12 16 17 20 23 26 31 32
OBSERVABLE_INCLUDE(0) rec[-6] rec[-5] rec[-4]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE1(0.125) 2 3 5 7 9 11 13 14 18 21 24 27 28 30 34 35
TICK
R 12 16 17 20 23 26 31 32
CX 2 15 5 19 7 22 9 25 3 29 11 33
C_XYZ 0 1 4 6 8 10
X_ERROR(0.125) 12 16 17 20 23 26 31 32
DEPOLARIZE2(0.125) 2 15 5 19 7 22 9 25 3 29 11 33
DEPOLARIZE1(0.125) 0 1 4 6 8 10 13 14 18 21 24 27 28 30 34 35
TICK
X_ERROR(0.125) 15 19 22 25 29 33
CX 0 12 6 20 8 23 10 26 1 31 4 32
C_XYZ 2 3 5 7 9 11
M 15 19 22 25 29 33
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 0 12 6 20 8 23 10 26 1 31 4 32
DEPOLARIZE1(0.125) 2 3 5 7 9 11 13 14 16 17 18 21 24 27 28 30 34 35
TICK
R 13 14 18 21 24 27 28 30 34 35
CX 2 16 3 17 5 20 11 26 7 31 9 32
C_XYZ 0 1 4 6 8 10
X_ERROR(0.125) 13 14 18 21 24 27 28 30 34 35
DEPOLARIZE2(0.125) 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE1(0.125) 0 1 4 6 8 10 12 15 19 22 23 25 29 33
TICK
X_ERROR(0.125) 12 16 17 20 23 26 31 32
CX 0 13 1 14 4 18 6 21 8 34 10 35
C_XYZ 2 3 5 7 9 11
M 12 16 17 20 23 26 31 32
OBSERVABLE_INCLUDE(0) rec[-6] rec[-5] rec[-4]
DETECTOR(1.5, 2, 0) rec[-19] rec[-17] rec[-15] rec[-5] rec[-3] rec[-1]
DETECTOR(0.5, 5, 0) rec[-21] rec[-18] rec[-16] rec[-7] rec[-4] rec[-2]
DETECTOR(0.5, -1, 0) rec[-22] rec[-20] rec[-8] rec[-6]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 0 13 1 14 4 18 6 21 8 34 10 35
DEPOLARIZE1(0.125) 2 3 5 7 9 11 15 19 22 24 25 27 28 29 30 33
TICK
R 15 19 22 25 29 33
CX 3 18 7 21 9 24 11 27 2 28 5 30
C_XYZ 0 1 4 6 8 10
X_ERROR(0.125) 15 19 22 25 29 33
DEPOLARIZE2(0.125) 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE1(0.125) 0 1 4 6 8 10 12 13 14 16 17 20 23 26 31 32 34 35
TICK
X_ERROR(0.125) 13 14 18 21 24 27 28 30 34 35
CX 1 15 4 19 8 22 10 25 0 29 6 33
C_XYZ 2 3 5 7 9 11
M 13 14 18 21 24 27 28 30 34 35
OBSERVABLE_INCLUDE(0) rec[-8] rec[-7]
DETECTOR(2.5, 3, 0) rec[-37] rec[-33] rec[-27] rec[-13] rec[-5] rec[-1]
DETECTOR(0.5, 3, 0) rec[-41] rec[-39] rec[-35] rec[-29] rec[-26] rec[-15] rec[-12] rec[-9] rec[-7] rec[-3]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE1(0.125) 2 3 5 7 9 11 12 16 17 20 23 26 31 32
TICK
REPEAT 48 {
R 13 14 18 21 24 27 28 30 34 35
CX 2 15 5 19 7 22 9 25 3 29 11 33
C_ZYX 0 1 4 6 8 10
X_ERROR(0.125) 13 14 18 21 24 27 28 30 34 35
DEPOLARIZE2(0.125) 2 15 5 19 7 22 9 25 3 29 11 33
DEPOLARIZE1(0.125) 0 1 4 6 8 10 12 16 17 20 23 26 31 32
TICK
X_ERROR(0.125) 15 19 22 25 29 33
CX 0 13 1 14 4 18 6 21 8 34 10 35
C_ZYX 2 3 5 7 9 11
M 15 19 22 25 29 33
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 0 13 1 14 4 18 6 21 8 34 10 35
DEPOLARIZE1(0.125) 2 3 5 7 9 11 12 16 17 20 23 24 26 27 28 30 31 32
TICK
R 12 16 17 20 23 26 31 32
CX 3 18 7 21 9 24 11 27 2 28 5 30
C_ZYX 0 1 4 6 8 10
X_ERROR(0.125) 12 16 17 20 23 26 31 32
DEPOLARIZE2(0.125) 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE1(0.125) 0 1 4 6 8 10 13 14 15 19 22 25 29 33 34 35
TICK
X_ERROR(0.125) 13 14 18 21 24 27 28 30 34 35
CX 0 12 6 20 8 23 10 26 1 31 4 32
C_ZYX 2 3 5 7 9 11
M 13 14 18 21 24 27 28 30 34 35
OBSERVABLE_INCLUDE(0) rec[-8] rec[-7]
DETECTOR(2.5, 1, 0) rec[-22] rec[-17] rec[-6] rec[-1]
DETECTOR(1.5, 4, 0) rec[-23] rec[-21] rec[-18] rec[-7] rec[-5] rec[-2]
DETECTOR(-0.5, 4, 0) rec[-25] rec[-20] rec[-9] rec[-4]
DETECTOR(0.5, 1, 0) rec[-26] rec[-24] rec[-19] rec[-10] rec[-8] rec[-3]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 0 12 6 20 8 23 10 26 1 31 4 32
DEPOLARIZE1(0.125) 2 3 5 7 9 11 15 16 17 19 22 25 29 33
TICK
R 15 19 22 25 29 33
CX 2 16 3 17 5 20 11 26 7 31 9 32
C_ZYX 0 1 4 6 8 10
X_ERROR(0.125) 15 19 22 25 29 33
DEPOLARIZE2(0.125) 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE1(0.125) 0 1 4 6 8 10 12 13 14 18 21 23 24 27 28 30 34 35
TICK
X_ERROR(0.125) 12 16 17 20 23 26 31 32
CX 1 15 4 19 8 22 10 25 0 29 6 33
C_ZYX 2 3 5 7 9 11
M 12 16 17 20 23 26 31 32
OBSERVABLE_INCLUDE(0) rec[-6] rec[-5] rec[-4]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE1(0.125) 2 3 5 7 9 11 13 14 18 21 24 27 28 30 34 35
TICK
R 12 16 17 20 23 26 31 32
CX 2 15 5 19 7 22 9 25 3 29 11 33
C_XYZ 0 1 4 6 8 10
X_ERROR(0.125) 12 16 17 20 23 26 31 32
DEPOLARIZE2(0.125) 2 15 5 19 7 22 9 25 3 29 11 33
DEPOLARIZE1(0.125) 0 1 4 6 8 10 13 14 18 21 24 27 28 30 34 35
TICK
X_ERROR(0.125) 15 19 22 25 29 33
CX 0 12 6 20 8 23 10 26 1 31 4 32
C_XYZ 2 3 5 7 9 11
M 15 19 22 25 29 33
DETECTOR(1.5, 2, 0) rec[-53] rec[-51] rec[-49] rec[-45] rec[-43] rec[-41] rec[-11] rec[-9] rec[-7] rec[-5] rec[-3] rec[-1]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 0 12 6 20 8 23 10 26 1 31 4 32
DEPOLARIZE1(0.125) 2 3 5 7 9 11 13 14 16 17 18 21 24 27 28 30 34 35
TICK
R 13 14 18 21 24 27 28 30 34 35
CX 2 16 3 17 5 20 11 26 7 31 9 32
C_XYZ 0 1 4 6 8 10
X_ERROR(0.125) 13 14 18 21 24 27 28 30 34 35
DEPOLARIZE2(0.125) 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE1(0.125) 0 1 4 6 8 10 12 15 19 22 23 25 29 33
TICK
X_ERROR(0.125) 12 16 17 20 23 26 31 32
CX 0 13 1 14 4 18 6 21 8 34 10 35
C_XYZ 2 3 5 7 9 11
M 12 16 17 20 23 26 31 32
OBSERVABLE_INCLUDE(0) rec[-6] rec[-5] rec[-4]
DETECTOR(1.5, 2, 0) rec[-19] rec[-17] rec[-15] rec[-5] rec[-3] rec[-1]
DETECTOR(0.5, 5, 0) rec[-21] rec[-18] rec[-16] rec[-7] rec[-4] rec[-2]
DETECTOR(0.5, -1, 0) rec[-22] rec[-20] rec[-8] rec[-6]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 0 13 1 14 4 18 6 21 8 34 10 35
DEPOLARIZE1(0.125) 2 3 5 7 9 11 15 19 22 24 25 27 28 29 30 33
TICK
R 15 19 22 25 29 33
CX 3 18 7 21 9 24 11 27 2 28 5 30
C_XYZ 0 1 4 6 8 10
X_ERROR(0.125) 15 19 22 25 29 33
DEPOLARIZE2(0.125) 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE1(0.125) 0 1 4 6 8 10 12 13 14 16 17 20 23 26 31 32 34 35
TICK
X_ERROR(0.125) 13 14 18 21 24 27 28 30 34 35
CX 1 15 4 19 8 22 10 25 0 29 6 33
C_XYZ 2 3 5 7 9 11
M 13 14 18 21 24 27 28 30 34 35
OBSERVABLE_INCLUDE(0) rec[-8] rec[-7]
DETECTOR(2.5, 3, 0) rec[-37] rec[-33] rec[-27] rec[-13] rec[-5] rec[-1]
DETECTOR(0.5, 3, 0) rec[-41] rec[-39] rec[-35] rec[-29] rec[-26] rec[-15] rec[-12] rec[-9] rec[-7] rec[-3]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE1(0.125) 2 3 5 7 9 11 12 16 17 20 23 26 31 32
TICK
}
R 13 14 18 21 24 27 28 30 34 35
CX 2 15 5 19 7 22 9 25 3 29 11 33
C_ZYX 0 1 4 6 8 10
X_ERROR(0.125) 13 14 18 21 24 27 28 30 34 35
DEPOLARIZE2(0.125) 2 15 5 19 7 22 9 25 3 29 11 33
DEPOLARIZE1(0.125) 0 1 4 6 8 10 12 16 17 20 23 26 31 32
TICK
X_ERROR(0.125) 15 19 22 25 29 33
CX 0 13 1 14 4 18 6 21 8 34 10 35
C_ZYX 2 3 5 7 9 11
M 15 19 22 25 29 33
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 0 13 1 14 4 18 6 21 8 34 10 35
DEPOLARIZE1(0.125) 2 3 5 7 9 11 12 16 17 20 23 24 26 27 28 30 31 32
TICK
R 12 16 17 20 23 26 31 32
CX 3 18 7 21 9 24 11 27 2 28 5 30
C_ZYX 0 1 4 6 8 10
X_ERROR(0.125) 12 16 17 20 23 26 31 32
DEPOLARIZE2(0.125) 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE1(0.125) 0 1 4 6 8 10 13 14 15 19 22 25 29 33 34 35
TICK
X_ERROR(0.125) 13 14 18 21 24 27 28 30 34 35
CX 0 12 6 20 8 23 10 26 1 31 4 32
C_ZYX 2 3 5 7 9 11
M 13 14 18 21 24 27 28 30 34 35
OBSERVABLE_INCLUDE(0) rec[-8] rec[-7]
DETECTOR(2.5, 1, 0) rec[-22] rec[-17] rec[-6] rec[-1]
DETECTOR(1.5, 4, 0) rec[-23] rec[-21] rec[-18] rec[-7] rec[-5] rec[-2]
DETECTOR(-0.5, 4, 0) rec[-25] rec[-20] rec[-9] rec[-4]
DETECTOR(0.5, 1, 0) rec[-26] rec[-24] rec[-19] rec[-10] rec[-8] rec[-3]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 0 12 6 20 8 23 10 26 1 31 4 32
DEPOLARIZE1(0.125) 2 3 5 7 9 11 15 16 17 19 22 25 29 33
TICK
R 15 19 22 25 29 33
CX 2 16 3 17 5 20 11 26 7 31 9 32
C_ZYX 0 1 4 6 8 10
X_ERROR(0.125) 15 19 22 25 29 33
DEPOLARIZE2(0.125) 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE1(0.125) 0 1 4 6 8 10 12 13 14 18 21 23 24 27 28 30 34 35
TICK
X_ERROR(0.125) 12 16 17 20 23 26 31 32
CX 1 15 4 19 8 22 10 25 0 29 6 33
C_ZYX 2 3 5 7 9 11
M 12 16 17 20 23 26 31 32
OBSERVABLE_INCLUDE(0) rec[-6] rec[-5] rec[-4]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE1(0.125) 2 3 5 7 9 11 13 14 18 21 24 27 28 30 34 35
TICK
R 12 16 17 20 23 26 31 32
CX 2 15 5 19 7 22 9 25 3 29 11 33
C_XYZ 0 1 4 6 8 10
X_ERROR(0.125) 12 16 17 20 23 26 31 32
DEPOLARIZE2(0.125) 2 15 5 19 7 22 9 25 3 29 11 33
DEPOLARIZE1(0.125) 0 1 4 6 8 10 13 14 18 21 24 27 28 30 34 35
TICK
X_ERROR(0.125) 15 19 22 25 29 33
CX 0 12 6 20 8 23 10 26 1 31 4 32
C_XYZ 2 3 5 7 9 11
M 15 19 22 25 29 33
DETECTOR(1.5, 2, 0) rec[-53] rec[-51] rec[-49] rec[-45] rec[-43] rec[-41] rec[-11] rec[-9] rec[-7] rec[-5] rec[-3] rec[-1]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 0 12 6 20 8 23 10 26 1 31 4 32
DEPOLARIZE1(0.125) 2 3 5 7 9 11 13 14 16 17 18 21 24 27 28 30 34 35
TICK
R 13 14 18 21 24 27 28 30 34 35
CX 2 16 3 17 5 20 11 26 7 31 9 32
C_XYZ 0 1 4 6 8 10
X_ERROR(0.125) 13 14 18 21 24 27 28 30 34 35
DEPOLARIZE2(0.125) 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE1(0.125) 0 1 4 6 8 10 12 15 19 22 23 25 29 33
TICK
X_ERROR(0.125) 12 16 17 20 23 26 31 32
CX 0 13 1 14 4 18 6 21 8 34 10 35
C_XYZ 2 3 5 7 9 11
M 12 16 17 20 23 26 31 32
OBSERVABLE_INCLUDE(0) rec[-6] rec[-5] rec[-4]
DETECTOR(1.5, 2, 0) rec[-19] rec[-17] rec[-15] rec[-5] rec[-3] rec[-1]
DETECTOR(0.5, 5, 0) rec[-21] rec[-18] rec[-16] rec[-7] rec[-4] rec[-2]
DETECTOR(0.5, -1, 0) rec[-22] rec[-20] rec[-8] rec[-6]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 0 13 1 14 4 18 6 21 8 34 10 35
DEPOLARIZE1(0.125) 2 3 5 7 9 11 15 19 22 24 25 27 28 29 30 33
TICK
R 15 19 22 25 29 33
CX 3 18 7 21 9 24 11 27 2 28 5 30
C_XYZ 0 1 4 6 8 10
X_ERROR(0.125) 15 19 22 25 29 33
DEPOLARIZE2(0.125) 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE1(0.125) 0 1 4 6 8 10 12 13 14 16 17 20 23 26 31 32 34 35
TICK
X_ERROR(0.125) 13 14 18 21 24 27 28 30 34 35
CX 1 15 4 19 8 22 10 25 0 29 6 33
C_XYZ 2 3 5 7 9 11
M 13 14 18 21 24 27 28 30 34 35
OBSERVABLE_INCLUDE(0) rec[-8] rec[-7]
DETECTOR(2.5, 3, 0) rec[-37] rec[-33] rec[-27] rec[-13] rec[-5] rec[-1]
DETECTOR(0.5, 3, 0) rec[-41] rec[-39] rec[-35] rec[-29] rec[-26] rec[-15] rec[-12] rec[-9] rec[-7] rec[-3]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE2(0.125) 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE1(0.125) 2 3 5 7 9 11 12 16 17 20 23 26 31 32
TICK
CX 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE2(0.125) 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE1(0.125) 2 3 5 7 9 11 12 13 14 16 17 18 20 21 23 24 26 27 28 30 31 32 34 35
TICK
X_ERROR(0.125) 0 1 2 3 4 5 6 7 8 9 10 11
M 0 1 2 3 4 5 6 7 8 9 10 11
OBSERVABLE_INCLUDE(0) rec[-9] rec[-8] rec[-6] rec[-5]
DEPOLARIZE1(0.125) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
TICK
""")
def test_exact_circuit_SI1000_V():
layout = HoneycombLayout(data_width=2,
data_height=6,
rounds=100,
noise_level=0.125,
noisy_gate_set='SI1000',
tested_observable='H',
sheared=False)
assert layout.ideal_and_noisy_circuit[1] == stim.Circuit("""
QUBIT_COORDS(0, 0) 0
QUBIT_COORDS(0, 4) 1
QUBIT_COORDS(0, 5) 2
QUBIT_COORDS(1, 0) 3
QUBIT_COORDS(1, 1) 4
QUBIT_COORDS(1, 2) 5
QUBIT_COORDS(1, 3) 6
QUBIT_COORDS(1, 4) 7
QUBIT_COORDS(1, 5) 8
QUBIT_COORDS(2, 1) 9
QUBIT_COORDS(2, 2) 10
QUBIT_COORDS(2, 3) 11
QUBIT_COORDS(0, -0.5) 12
QUBIT_COORDS(0, 0.5) 13
QUBIT_COORDS(0, 3.5) 14
QUBIT_COORDS(0, 4.5) 15
QUBIT_COORDS(0, 5.5) 16
QUBIT_COORDS(1, -0.5) 17
QUBIT_COORDS(1, 0.5) 18
QUBIT_COORDS(1, 1.5) 19
QUBIT_COORDS(1, 2.5) 20
QUBIT_COORDS(1, 3.5) 21
QUBIT_COORDS(1, 4.5) 22
QUBIT_COORDS(1, 5.5) 23
QUBIT_COORDS(2, 0.5) 24
QUBIT_COORDS(2, 1.5) 25
QUBIT_COORDS(2, 2.5) 26
QUBIT_COORDS(2, 3.5) 27
QUBIT_COORDS(-0.5, 5) 28
QUBIT_COORDS(0.5, 0) 29
QUBIT_COORDS(0.5, 2) 30
QUBIT_COORDS(0.5, 4) 31
QUBIT_COORDS(1.5, 1) 32
QUBIT_COORDS(1.5, 3) 33
QUBIT_COORDS(1.5, 5) 34
QUBIT_COORDS(2.5, 2) 35
R 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
X_ERROR(0.25) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
TICK
H 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
C_XYZ 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.0125) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 0 1 2 3 4 5 6 7 8 9 10 11
TICK
CZ 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE2(0.125) 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 12 13 14 16 17 18 20 21 23 24 26 27 28 30 31 32 34 35
TICK
CZ 2 15 5 19 7 22 9 25 3 29 11 33
C_ZYX 0 1 4 6 8 10
DEPOLARIZE2(0.125) 2 15 5 19 7 22 9 25 3 29 11 33
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 16 17 18 20 21 23 24 26 27 28 30 31 32 34 35
TICK
CZ 0 13 1 14 4 18 6 21 8 34 10 35
C_ZYX 2 3 5 7 9 11
DEPOLARIZE2(0.125) 0 13 1 14 4 18 6 21 8 34 10 35
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 12 15 16 17 19 20 22 23 24 25 26 27 28 29 30 31 32 33
TICK
CZ 3 18 7 21 9 24 11 27 2 28 5 30
C_ZYX 0 1 4 6 8 10
DEPOLARIZE2(0.125) 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 15 16 17 19 20 22 23 25 26 29 31 32 33 34 35
TICK
CZ 0 12 6 20 8 23 10 26 1 31 4 32
C_ZYX 2 3 5 7 9 11
DEPOLARIZE2(0.125) 0 12 6 20 8 23 10 26 1 31 4 32
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 13 14 15 16 17 18 19 21 22 24 25 27 28 29 30 33 34 35
TICK
CZ 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE2(0.125) 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 15 18 19 21 22 23 24 25 27 28 29 30 33 34 35
TICK
H 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
DEPOLARIZE1(0.0125) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 0 1 2 3 4 5 6 7 8 9 10 11
TICK
X_ERROR(0.625) 15 19 22 25 29 33 13 14 18 21 24 27 28 30 34 35 12 16 17 20 23 26 31 32
M 15 19 22 25 29 33
SHIFT_COORDS(0, 0, 1)
M 13 14 18 21 24 27 28 30 34 35
OBSERVABLE_INCLUDE(1) rec[-4] rec[-2]
DETECTOR(2.5, 1, 0) rec[-6] rec[-1]
DETECTOR(1.5, 4, 0) rec[-7] rec[-5] rec[-2]
DETECTOR(-0.5, 4, 0) rec[-9] rec[-4]
DETECTOR(0.5, 1, 0) rec[-10] rec[-8] rec[-3]
SHIFT_COORDS(0, 0, 1)
M 12 16 17 20 23 26 31 32
OBSERVABLE_INCLUDE(1) rec[-2]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE1(0.0125) 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.25) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
R 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
X_ERROR(0.25) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
DEPOLARIZE1(0.0125) 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.25) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
H 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
C_ZYX 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.0125) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 0 1 2 3 4 5 6 7 8 9 10 11
TICK
CZ 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE2(0.125) 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 12 13 14 16 17 18 20 21 23 24 26 27 28 30 31 32 34 35
TICK
CZ 2 15 5 19 7 22 9 25 3 29 11 33
C_XYZ 0 1 4 6 8 10
DEPOLARIZE2(0.125) 2 15 5 19 7 22 9 25 3 29 11 33
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 16 17 18 20 21 23 24 26 27 28 30 31 32 34 35
TICK
CZ 0 12 6 20 8 23 10 26 1 31 4 32
C_XYZ 2 3 5 7 9 11
DEPOLARIZE2(0.125) 0 12 6 20 8 23 10 26 1 31 4 32
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 13 14 15 16 17 18 19 21 22 24 25 27 28 29 30 33 34 35
TICK
CZ 2 16 3 17 5 20 11 26 7 31 9 32
C_XYZ 0 1 4 6 8 10
DEPOLARIZE2(0.125) 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 15 18 19 21 22 23 24 25 27 28 29 30 33 34 35
TICK
CZ 0 13 1 14 4 18 6 21 8 34 10 35
C_XYZ 2 3 5 7 9 11
DEPOLARIZE2(0.125) 0 13 1 14 4 18 6 21 8 34 10 35
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 12 15 16 17 19 20 22 23 24 25 26 27 28 29 30 31 32 33
TICK
CZ 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE2(0.125) 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 15 16 17 19 20 22 23 25 26 29 31 32 33 34 35
TICK
H 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
C_ZYX 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.0125) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 0 1 2 3 4 5 6 7 8 9 10 11
TICK
X_ERROR(0.625) 15 19 22 25 29 33 12 16 17 20 23 26 31 32 13 14 18 21 24 27 28 30 34 35
M 15 19 22 25 29 33
DETECTOR(1.5, 2, 0) rec[-11] rec[-9] rec[-7] rec[-5] rec[-3] rec[-1]
SHIFT_COORDS(0, 0, 1)
M 12 16 17 20 23 26 31 32
OBSERVABLE_INCLUDE(1) rec[-2]
DETECTOR(1.5, 2, 0) rec[-19] rec[-17] rec[-15] rec[-5] rec[-3] rec[-1]
DETECTOR(0.5, 5, 0) rec[-21] rec[-18] rec[-16] rec[-7] rec[-4] rec[-2]
DETECTOR(0.5, -1, 0) rec[-22] rec[-20] rec[-8] rec[-6]
SHIFT_COORDS(0, 0, 1)
M 13 14 18 21 24 27 28 30 34 35
OBSERVABLE_INCLUDE(1) rec[-4] rec[-2]
DETECTOR(2.5, 3, 0) rec[-37] rec[-33] rec[-27] rec[-13] rec[-5] rec[-1]
DETECTOR(0.5, 3, 0) rec[-41] rec[-39] rec[-35] rec[-29] rec[-26] rec[-15] rec[-12] rec[-9] rec[-7] rec[-3]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE1(0.0125) 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.25) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
REPEAT 48 {
R 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
X_ERROR(0.25) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
DEPOLARIZE1(0.0125) 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.25) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
H 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
C_ZYX 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.0125) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 0 1 2 3 4 5 6 7 8 9 10 11
TICK
CZ 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE2(0.125) 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 12 13 14 16 17 18 20 21 23 24 26 27 28 30 31 32 34 35
TICK
CZ 2 15 5 19 7 22 9 25 3 29 11 33
C_ZYX 0 1 4 6 8 10
DEPOLARIZE2(0.125) 2 15 5 19 7 22 9 25 3 29 11 33
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 16 17 18 20 21 23 24 26 27 28 30 31 32 34 35
TICK
CZ 0 13 1 14 4 18 6 21 8 34 10 35
C_ZYX 2 3 5 7 9 11
DEPOLARIZE2(0.125) 0 13 1 14 4 18 6 21 8 34 10 35
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 12 15 16 17 19 20 22 23 24 25 26 27 28 29 30 31 32 33
TICK
CZ 3 18 7 21 9 24 11 27 2 28 5 30
C_ZYX 0 1 4 6 8 10
DEPOLARIZE2(0.125) 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 15 16 17 19 20 22 23 25 26 29 31 32 33 34 35
TICK
CZ 0 12 6 20 8 23 10 26 1 31 4 32
C_ZYX 2 3 5 7 9 11
DEPOLARIZE2(0.125) 0 12 6 20 8 23 10 26 1 31 4 32
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 13 14 15 16 17 18 19 21 22 24 25 27 28 29 30 33 34 35
TICK
CZ 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE2(0.125) 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 15 18 19 21 22 23 24 25 27 28 29 30 33 34 35
TICK
H 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
DEPOLARIZE1(0.0125) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 0 1 2 3 4 5 6 7 8 9 10 11
TICK
X_ERROR(0.625) 15 19 22 25 29 33 13 14 18 21 24 27 28 30 34 35 12 16 17 20 23 26 31 32
M 15 19 22 25 29 33
SHIFT_COORDS(0, 0, 1)
M 13 14 18 21 24 27 28 30 34 35
OBSERVABLE_INCLUDE(1) rec[-4] rec[-2]
DETECTOR(2.5, 1, 0) rec[-22] rec[-17] rec[-6] rec[-1]
DETECTOR(1.5, 4, 0) rec[-23] rec[-21] rec[-18] rec[-7] rec[-5] rec[-2]
DETECTOR(-0.5, 4, 0) rec[-25] rec[-20] rec[-9] rec[-4]
DETECTOR(0.5, 1, 0) rec[-26] rec[-24] rec[-19] rec[-10] rec[-8] rec[-3]
SHIFT_COORDS(0, 0, 1)
M 12 16 17 20 23 26 31 32
OBSERVABLE_INCLUDE(1) rec[-2]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE1(0.0125) 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.25) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
R 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
X_ERROR(0.25) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
DEPOLARIZE1(0.0125) 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.25) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
H 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
C_ZYX 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.0125) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 0 1 2 3 4 5 6 7 8 9 10 11
TICK
CZ 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE2(0.125) 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 12 13 14 16 17 18 20 21 23 24 26 27 28 30 31 32 34 35
TICK
CZ 2 15 5 19 7 22 9 25 3 29 11 33
C_XYZ 0 1 4 6 8 10
DEPOLARIZE2(0.125) 2 15 5 19 7 22 9 25 3 29 11 33
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 16 17 18 20 21 23 24 26 27 28 30 31 32 34 35
TICK
CZ 0 12 6 20 8 23 10 26 1 31 4 32
C_XYZ 2 3 5 7 9 11
DEPOLARIZE2(0.125) 0 12 6 20 8 23 10 26 1 31 4 32
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 13 14 15 16 17 18 19 21 22 24 25 27 28 29 30 33 34 35
TICK
CZ 2 16 3 17 5 20 11 26 7 31 9 32
C_XYZ 0 1 4 6 8 10
DEPOLARIZE2(0.125) 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 15 18 19 21 22 23 24 25 27 28 29 30 33 34 35
TICK
CZ 0 13 1 14 4 18 6 21 8 34 10 35
C_XYZ 2 3 5 7 9 11
DEPOLARIZE2(0.125) 0 13 1 14 4 18 6 21 8 34 10 35
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 12 15 16 17 19 20 22 23 24 25 26 27 28 29 30 31 32 33
TICK
CZ 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE2(0.125) 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 15 16 17 19 20 22 23 25 26 29 31 32 33 34 35
TICK
H 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
C_ZYX 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.0125) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 0 1 2 3 4 5 6 7 8 9 10 11
TICK
X_ERROR(0.625) 15 19 22 25 29 33 12 16 17 20 23 26 31 32 13 14 18 21 24 27 28 30 34 35
M 15 19 22 25 29 33
DETECTOR(1.5, 2, 0) rec[-53] rec[-51] rec[-49] rec[-45] rec[-43] rec[-41] rec[-11] rec[-9] rec[-7] rec[-5] rec[-3] rec[-1]
SHIFT_COORDS(0, 0, 1)
M 12 16 17 20 23 26 31 32
OBSERVABLE_INCLUDE(1) rec[-2]
DETECTOR(1.5, 2, 0) rec[-19] rec[-17] rec[-15] rec[-5] rec[-3] rec[-1]
DETECTOR(0.5, 5, 0) rec[-21] rec[-18] rec[-16] rec[-7] rec[-4] rec[-2]
DETECTOR(0.5, -1, 0) rec[-22] rec[-20] rec[-8] rec[-6]
SHIFT_COORDS(0, 0, 1)
M 13 14 18 21 24 27 28 30 34 35
OBSERVABLE_INCLUDE(1) rec[-4] rec[-2]
DETECTOR(2.5, 3, 0) rec[-37] rec[-33] rec[-27] rec[-13] rec[-5] rec[-1]
DETECTOR(0.5, 3, 0) rec[-41] rec[-39] rec[-35] rec[-29] rec[-26] rec[-15] rec[-12] rec[-9] rec[-7] rec[-3]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE1(0.0125) 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.25) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
}
R 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
X_ERROR(0.25) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
DEPOLARIZE1(0.0125) 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.25) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
H 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
C_ZYX 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.0125) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 0 1 2 3 4 5 6 7 8 9 10 11
TICK
CZ 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE2(0.125) 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 12 13 14 16 17 18 20 21 23 24 26 27 28 30 31 32 34 35
TICK
CZ 2 15 5 19 7 22 9 25 3 29 11 33
C_ZYX 0 1 4 6 8 10
DEPOLARIZE2(0.125) 2 15 5 19 7 22 9 25 3 29 11 33
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 16 17 18 20 21 23 24 26 27 28 30 31 32 34 35
TICK
CZ 0 13 1 14 4 18 6 21 8 34 10 35
C_ZYX 2 3 5 7 9 11
DEPOLARIZE2(0.125) 0 13 1 14 4 18 6 21 8 34 10 35
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 12 15 16 17 19 20 22 23 24 25 26 27 28 29 30 31 32 33
TICK
CZ 3 18 7 21 9 24 11 27 2 28 5 30
C_ZYX 0 1 4 6 8 10
DEPOLARIZE2(0.125) 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 15 16 17 19 20 22 23 25 26 29 31 32 33 34 35
TICK
CZ 0 12 6 20 8 23 10 26 1 31 4 32
C_ZYX 2 3 5 7 9 11
DEPOLARIZE2(0.125) 0 12 6 20 8 23 10 26 1 31 4 32
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 13 14 15 16 17 18 19 21 22 24 25 27 28 29 30 33 34 35
TICK
CZ 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE2(0.125) 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 15 18 19 21 22 23 24 25 27 28 29 30 33 34 35
TICK
H 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
DEPOLARIZE1(0.0125) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 0 1 2 3 4 5 6 7 8 9 10 11
TICK
X_ERROR(0.625) 15 19 22 25 29 33 13 14 18 21 24 27 28 30 34 35 12 16 17 20 23 26 31 32
M 15 19 22 25 29 33
SHIFT_COORDS(0, 0, 1)
M 13 14 18 21 24 27 28 30 34 35
OBSERVABLE_INCLUDE(1) rec[-4] rec[-2]
DETECTOR(2.5, 1, 0) rec[-22] rec[-17] rec[-6] rec[-1]
DETECTOR(1.5, 4, 0) rec[-23] rec[-21] rec[-18] rec[-7] rec[-5] rec[-2]
DETECTOR(-0.5, 4, 0) rec[-25] rec[-20] rec[-9] rec[-4]
DETECTOR(0.5, 1, 0) rec[-26] rec[-24] rec[-19] rec[-10] rec[-8] rec[-3]
SHIFT_COORDS(0, 0, 1)
M 12 16 17 20 23 26 31 32
OBSERVABLE_INCLUDE(1) rec[-2]
SHIFT_COORDS(0, 0, 1)
DEPOLARIZE1(0.0125) 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.25) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
R 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
X_ERROR(0.25) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
DEPOLARIZE1(0.0125) 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.25) 0 1 2 3 4 5 6 7 8 9 10 11
TICK
H 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
C_ZYX 0 1 2 3 4 5 6 7 8 9 10 11
DEPOLARIZE1(0.0125) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 0 1 2 3 4 5 6 7 8 9 10 11
TICK
CZ 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE2(0.125) 1 15 4 19 8 22 10 25 0 29 6 33
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 12 13 14 16 17 18 20 21 23 24 26 27 28 30 31 32 34 35
TICK
CZ 2 15 5 19 7 22 9 25 3 29 11 33
C_XYZ 0 1 4 6 8 10
DEPOLARIZE2(0.125) 2 15 5 19 7 22 9 25 3 29 11 33
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 16 17 18 20 21 23 24 26 27 28 30 31 32 34 35
TICK
CZ 0 12 6 20 8 23 10 26 1 31 4 32
C_XYZ 2 3 5 7 9 11
DEPOLARIZE2(0.125) 0 12 6 20 8 23 10 26 1 31 4 32
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 13 14 15 16 17 18 19 21 22 24 25 27 28 29 30 33 34 35
TICK
CZ 2 16 3 17 5 20 11 26 7 31 9 32
C_XYZ 0 1 4 6 8 10
DEPOLARIZE2(0.125) 2 16 3 17 5 20 11 26 7 31 9 32
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 15 18 19 21 22 23 24 25 27 28 29 30 33 34 35
TICK
CZ 0 13 1 14 4 18 6 21 8 34 10 35
C_XYZ 2 3 5 7 9 11
DEPOLARIZE2(0.125) 0 13 1 14 4 18 6 21 8 34 10 35
DEPOLARIZE1(0.0125) 2 3 5 7 9 11 12 15 16 17 19 20 22 23 24 25 26 27 28 29 30 31 32 33
TICK
CZ 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE2(0.125) 3 18 7 21 9 24 11 27 2 28 5 30
DEPOLARIZE1(0.0125) 0 1 4 6 8 10 12 13 14 15 16 17 19 20 22 23 25 26 29 31 32 33 34 35
TICK
H 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
DEPOLARIZE1(0.0125) 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 0 1 2 3 4 5 6 7 8 9 10 11
TICK
X_ERROR(0.625) 15 19 22 25 29 33 12 16 17 20 23 26 31 32 13 14 18 21 24 27 28 30 34 35 0 1 2 3 4 5 6 7 8 9 10 11
M 15 19 22 25 29 33
DETECTOR(1.5, 2, 0) rec[-53] rec[-51] rec[-49] rec[-45] rec[-43] rec[-41] rec[-11] rec[-9] rec[-7] rec[-5] rec[-3] rec[-1]
SHIFT_COORDS(0, 0, 1)
M 12 16 17 20 23 26 31 32
OBSERVABLE_INCLUDE(1) rec[-2]
DETECTOR(1.5, 2, 0) rec[-19] rec[-17] rec[-15] rec[-5] rec[-3] rec[-1]
DETECTOR(0.5, 5, 0) rec[-21] rec[-18] rec[-16] rec[-7] rec[-4] rec[-2]
DETECTOR(0.5, -1, 0) rec[-22] rec[-20] rec[-8] rec[-6]
SHIFT_COORDS(0, 0, 1)
M 13 14 18 21 24 27 28 30 34 35
OBSERVABLE_INCLUDE(1) rec[-4] rec[-2]
DETECTOR(2.5, 3, 0) rec[-37] rec[-33] rec[-27] rec[-13] rec[-5] rec[-1]
DETECTOR(0.5, 3, 0) rec[-41] rec[-39] rec[-35] rec[-29] rec[-26] rec[-15] rec[-12] rec[-9] rec[-7] rec[-3]
SHIFT_COORDS(0, 0, 1)
M 0 1 2 3 4 5 6 7 8 9 10 11
DETECTOR(0, 0.5, 0) rec[-22] rec[-12]
DETECTOR(0, 3.5, 0) rec[-21] rec[-11]
DETECTOR(1, 0.5, 0) rec[-20] rec[-9] rec[-8]
DETECTOR(1, 3.5, 0) rec[-19] rec[-6] rec[-5]
DETECTOR(2, 0.5, 0) rec[-18] rec[-3]
DETECTOR(2, 3.5, 0) rec[-17] rec[-1]
DETECTOR(-0.5, 5, 0) rec[-16] rec[-10]
DETECTOR(0.5, 2, 0) rec[-15] rec[-7]
DETECTOR(1.5, 5, 0) rec[-14] rec[-4]
DETECTOR(2.5, 2, 0) rec[-13] rec[-2]
DETECTOR(1.5, 2, 0) rec[-35] rec[-33] rec[-31] rec[-27] rec[-25] rec[-23] rec[-8] rec[-7] rec[-6] rec[-3] rec[-2] rec[-1]
OBSERVABLE_INCLUDE(1) rec[-11] rec[-10] rec[-5] rec[-4]
TICK
""")
| 32.510438
| 153
| 0.485302
| 44,408
| 241,390
| 2.621104
| 0.004121
| 0.086806
| 0.383512
| 0.241241
| 0.988522
| 0.987672
| 0.982672
| 0.98183
| 0.980541
| 0.980412
| 0
| 0.566679
| 0.409499
| 241,390
| 7,424
| 154
| 32.514817
| 0.249914
| 0
| 0
| 0.98053
| 0
| 0.053002
| 0.976739
| 0.010614
| 0
| 0
| 0
| 0
| 0.001217
| 1
| 0.001082
| false
| 0
| 0.000406
| 0
| 0.001487
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
90a85f951c5ce7186ac7ceb3b0de61b77d7b180f
| 466
|
py
|
Python
|
Ifcondition.py
|
gurmeetkhehra/python-practice
|
abeb5586f8c1e673fd8ff312a4ae0941f2a0194b
|
[
"Apache-2.0"
] | null | null | null |
Ifcondition.py
|
gurmeetkhehra/python-practice
|
abeb5586f8c1e673fd8ff312a4ae0941f2a0194b
|
[
"Apache-2.0"
] | null | null | null |
Ifcondition.py
|
gurmeetkhehra/python-practice
|
abeb5586f8c1e673fd8ff312a4ae0941f2a0194b
|
[
"Apache-2.0"
] | null | null | null |
dollar_bill = 10
if dollar_bill < 1000:
print('bill exist')
else:
print ('bill does not exist')
# dollar_bill = 1000
#
# if dollar_bill < 800:
# print ('bill exist')
# else:
# print ('bill does not exist')
#
# dollar_bill = 1000
# if dollar_bill > 500:
# print ('bill exist')
# else:
# print ('bill does not exist')
#
# dollar_bill = 1000
# if dollar_bill > 500:
# print('bill exist')
# else:
# print('bill does not exist')
jKDK
| 17.923077
| 35
| 0.607296
| 65
| 466
| 4.230769
| 0.2
| 0.290909
| 0.174545
| 0.261818
| 0.872727
| 0.872727
| 0.872727
| 0.872727
| 0.872727
| 0.872727
| 0
| 0.077364
| 0.251073
| 466
| 25
| 36
| 18.64
| 0.710602
| 0.684549
| 0
| 0
| 0
| 0
| 0.226563
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
90c40f9f0e3cb4ff1c71835564a3b83e26fd8ccb
| 1,327
|
py
|
Python
|
lfs/shipping/__init__.py
|
naro/django-lfs
|
312404370e497d00aa0f7221dc55a70a20490fb5
|
[
"BSD-3-Clause"
] | null | null | null |
lfs/shipping/__init__.py
|
naro/django-lfs
|
312404370e497d00aa0f7221dc55a70a20490fb5
|
[
"BSD-3-Clause"
] | null | null | null |
lfs/shipping/__init__.py
|
naro/django-lfs
|
312404370e497d00aa0f7221dc55a70a20490fb5
|
[
"BSD-3-Clause"
] | null | null | null |
# lfs imports
from lfs.plugins import ShippingMethodPriceCalculator
class GrossShippingMethodPriceCalculator(ShippingMethodPriceCalculator):
"""
ShippingMethodPriceCalculator which considers the entered price as gross
price.
See lfs.plugins.ShippingMethodPriceCalculator
"""
def get_price_net(self):
"""See lfs.plugins.ShippingMethodPriceCalculator.
"""
try:
return self.shipping_method.price / ((100 + self.shipping_method.tax.rate) / 100)
except AttributeError:
return self.shipping_method.price
def get_price_gross(self):
"""See lfs.plugins.ShippingMethodPriceCalculator.
"""
return self.shipping_method.price
class NetShippingMethodPriceCalculator(ShippingMethodPriceCalculator):
"""
ShippingMethodPriceCalculator which considers the entered price as net
price.
"""
def get_price_net(self):
"""See lfs.plugins.ShippingMethodPriceCalculator.
"""
return self.shipping_method.price
def get_price_gross(self):
"""See lfs.plugins.ShippingMethodPriceCalculator.
"""
try:
return self.shipping_method.price * ((100 + self.shipping_method.tax.rate) / 100)
except AttributeError:
return self.shipping_method.price
| 30.860465
| 93
| 0.689525
| 121
| 1,327
| 7.429752
| 0.247934
| 0.106785
| 0.160178
| 0.160178
| 0.783092
| 0.783092
| 0.783092
| 0.783092
| 0.585095
| 0.553949
| 0
| 0.01173
| 0.229088
| 1,327
| 42
| 94
| 31.595238
| 0.867058
| 0.332329
| 0
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.235294
| false
| 0
| 0.058824
| 0
| 0.764706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
90d6894fc4665a147e779d80b62ed75cc370481e
| 2,659
|
py
|
Python
|
project_sample/src/server_example.py
|
Volkova-Natalia/aiohttp_lamp
|
7edc8ab9e0ca255ec624ec6ff0dcec02f2d23742
|
[
"MIT"
] | null | null | null |
project_sample/src/server_example.py
|
Volkova-Natalia/aiohttp_lamp
|
7edc8ab9e0ca255ec624ec6ff0dcec02f2d23742
|
[
"MIT"
] | null | null | null |
project_sample/src/server_example.py
|
Volkova-Natalia/aiohttp_lamp
|
7edc8ab9e0ca255ec624ec6ff0dcec02f2d23742
|
[
"MIT"
] | null | null | null |
from aiohttp import web, WSMsgType
import asyncio
from settings import SERVER_HOST, SERVER_PORT
async def ws_handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
await asyncio.sleep(3)
await ws.send_str('Hello, client!')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x55\x77\xff\xaa')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x12')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x13')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x20')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x12\x01')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x13\x01')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x20\x01')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x12\x00\xaa')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x13\x00\xaa')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x20\x03')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x20\x03\xaa')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x20\x03\xaa\xaa\xaa\xaa')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x12\x00')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x13\x00')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x20\x03\xaa\xbb\xcc')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x12\x00\x01')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x12\x00\x01\xaa')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x12\x01\x00')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x12\x01\x00\xaa')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x12\x00\x00\xaa')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x13\x00\x01')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x13\x00\x01\xaa')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x13\x01\x00')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x13\x01\x00\xaa')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x13\x00\x00\xaa')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x20\x03\x00\xaa\xbb\xcc')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x20\x00\x03\xaa\xbb')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x20\x00\x03\xaa\xbb\xcc\xdd')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x12\x00\x00')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x13\x00\x00')
await asyncio.sleep(0.1)
await ws.send_bytes(b'\x20\x00\x03\xaa\xbb\xcc')
return ws
if __name__ == '__main__':
app = web.Application()
app.add_routes([web.get('/', ws_handler)])
web.run_app(app, host=SERVER_HOST, port=SERVER_PORT)
| 30.563218
| 56
| 0.66604
| 476
| 2,659
| 3.619748
| 0.107143
| 0.134068
| 0.315728
| 0.323854
| 0.81834
| 0.81834
| 0.81834
| 0.81834
| 0.81834
| 0.81834
| 0
| 0.096599
| 0.170741
| 2,659
| 86
| 57
| 30.918605
| 0.684807
| 0
| 0
| 0.413333
| 0
| 0
| 0.166604
| 0.037608
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.053333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
90efcfeb61e5e91585212d2503498b1d261f8605
| 15,307
|
py
|
Python
|
models_SHOT_convex/portfol_roundlot.py
|
grossmann-group/pyomo-MINLP-benchmarking
|
714f0a0dffd61675649a805683c0627af6b4929e
|
[
"MIT"
] | null | null | null |
models_SHOT_convex/portfol_roundlot.py
|
grossmann-group/pyomo-MINLP-benchmarking
|
714f0a0dffd61675649a805683c0627af6b4929e
|
[
"MIT"
] | null | null | null |
models_SHOT_convex/portfol_roundlot.py
|
grossmann-group/pyomo-MINLP-benchmarking
|
714f0a0dffd61675649a805683c0627af6b4929e
|
[
"MIT"
] | null | null | null |
# MINLP written by GAMS Convert at 05/15/20 00:51:12
#
# Equation counts
# Total E G L N X C B
# 12 10 2 0 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 18 10 0 8 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 43 27 16 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,None),initialize=0)
m.i10 = Var(within=Integers,bounds=(0,100000),initialize=0)
m.i11 = Var(within=Integers,bounds=(0,32000),initialize=0)
m.i12 = Var(within=Integers,bounds=(0,78000),initialize=0)
m.i13 = Var(within=Integers,bounds=(0,56000),initialize=0)
m.i14 = Var(within=Integers,bounds=(0,43000),initialize=0)
m.i15 = Var(within=Integers,bounds=(0,100000),initialize=0)
m.i16 = Var(within=Integers,bounds=(0,55000),initialize=0)
m.i17 = Var(within=Integers,bounds=(0,78000),initialize=0)
m.obj = Objective(expr= m.x9, sense=minimize)
m.c2 = Constraint(expr=1.07813636363636*m.x1 - sqrt(0.0476190476190476*(-0.00313636363636371*m.x1 - 0.150909090909091*
m.x2 - 0.267772727272727*m.x3 - 0.308636363636363*m.x4 - 0.423318181818182*m.x5 -
0.0687727272727274*m.x6 - 0.290227272727273*m.x7 + 0.548045454545455*m.x8)**2 +
0.0476190476190476*(0.0058636363636364*m.x1 - 0.0729090909090906*m.x2 - 0.384772727272727*m.x3 -
0.407636363636363*m.x4 - 0.459318181818182*m.x5 - 0.0897727272727273*m.x6 - 0.373227272727273*
m.x7 + 0.593045454545455*m.x8)**2 + 0.0476190476190476*(-0.0171363636363637*m.x1 -
0.0369090909090906*m.x2 + 0.251227272727273*m.x3 + 0.261363636363637*m.x4 + 0.196681818181818*
m.x5 + 0.0312272727272727*m.x6 + 0.212772727272727*m.x7 - 0.368954545454545*m.x8)**2 +
0.0476190476190476*(0.0820909090909094*m.x2 - 0.0261363636363636*m.x1 + 0.116227272727273*m.x3 +
0.142363636363637*m.x4 + 0.158681818181818*m.x5 + 0.0642272727272726*m.x6 - 0.116227272727273*
m.x7 - 0.168954545454545*m.x8)**2 + 0.0476190476190476*(-0.0231363636363637*m.x1 -
0.0909090909090906*m.x2 - 0.193772727272727*m.x3 - 0.149636363636363*m.x4 - 0.0283181818181817*
m.x5 - 0.0617727272727273*m.x6 + 0.0397727272727273*m.x7 + 0.0710454545454546*m.x8)**2 +
0.0476190476190476*(-0.00113636363636371*m.x1 - 0.110909090909091*m.x2 - 0.0557727272727273*m.x3
- 0.0306363636363634*m.x4 + 0.0246818181818182*m.x5 - 0.0797727272727273*m.x6 +
0.184772727272727*m.x7 + 0.166045454545455*m.x8)**2 + 0.0476190476190476*(0.0308636363636363*m.x1
- 0.114909090909091*m.x2 + 0.0642272727272726*m.x3 + 0.132363636363637*m.x4 + 0.185681818181818*
m.x5 - 0.0687727272727274*m.x6 - 0.0932272727272727*m.x7 + 1.08304545454545*m.x8)**2 +
0.0476190476190476*(0.0488636363636363*m.x1 - 0.145909090909091*m.x2 + 0.203227272727273*m.x3 +
0.213363636363637*m.x4 + 0.245681818181818*m.x5 - 0.0607727272727274*m.x6 + 0.0847727272727272*
m.x7 + 0.167045454545455*m.x8)**2 + 0.0476190476190476*(0.0778636363636362*m.x1 -
0.0899090909090907*m.x2 - 0.170772727272727*m.x3 - 0.160636363636363*m.x4 - 0.131318181818182*
m.x5 - 0.0187727272727274*m.x6 - 0.164227272727273*m.x7 - 0.440954545454545*m.x8)**2 +
0.0476190476190476*(0.0388636363636363*m.x1 + 0.372090909090909*m.x2 + 0.0952272727272727*m.x3 +
0.0633636363636367*m.x4 + 0.0916818181818184*m.x5 + 0.219227272727273*m.x6 - 0.160227272727273*
m.x7 - 0.0449545454545452*m.x8)**2 + 0.0476190476190476*(0.0138636363636364*m.x1 -
0.107909090909091*m.x2 + 0.104227272727273*m.x3 + 0.111363636363637*m.x4 + 0.0956818181818184*
m.x5 - 0.0117727272727273*m.x6 + 0.0957727272727273*m.x7 - 0.256954545454545*m.x8)**2 +
0.0476190476190476*(0.0248636363636363*m.x1 + 0.0660909090909094*m.x2 - 0.0587727272727274*m.x3
- 0.0936363636363633*m.x4 - 0.218318181818182*m.x5 + 0.0582272727272726*m.x6 -
0.0672272727272727*m.x7 - 0.303954545454545*m.x8)**2 + 0.0476190476190476*(0.0018636363636364*
m.x1 + 0.273090909090909*m.x2 + 0.196227272727273*m.x3 + 0.202363636363637*m.x4 +
0.211681818181818*m.x5 + 0.121227272727273*m.x6 + 0.420772727272727*m.x7 - 0.122954545454545*m.x8
)**2 + 0.0476190476190476*(0.216090909090909*m.x2 - 0.0151363636363637*m.x1 + 0.0662272727272726*
m.x3 + 0.0373636363636367*m.x4 - 0.0353181818181816*m.x5 + 0.0642272727272726*m.x6 +
0.552772727272727*m.x7 + 0.0870454545454546*m.x8)**2 + 0.0476190476190476*(-0.0171363636363637*
m.x1 - 0.167909090909091*m.x2 - 0.0677727272727273*m.x3 - 0.100636363636363*m.x4 -
0.162318181818182*m.x5 - 0.0687727272727274*m.x6 + 0.104772727272727*m.x7 + 0.115045454545455*
m.x8)**2 + 0.0476190476190476*(-0.00713636363636372*m.x1 - 0.00690909090909053*m.x2 +
0.0452272727272727*m.x3 + 0.0553636363636367*m.x4 + 0.0436818181818184*m.x5 - 0.0157727272727273*
m.x6 + 0.141772727272727*m.x7 - 0.267954545454545*m.x8)**2 + 0.0476190476190476*(
0.0088636363636363*m.x1 + 0.119090909090909*m.x2 + 0.196227272727273*m.x3 + 0.168363636363637*
m.x4 + 0.0826818181818183*m.x5 + 0.0502272727272726*m.x6 - 0.0362272727272728*m.x7 -
0.151954545454545*m.x8)**2 + 0.0476190476190476*(0.0018636363636364*m.x1 - 0.0389090909090906*
m.x2 - 0.151772727272727*m.x3 - 0.185636363636363*m.x4 - 0.291318181818182*m.x5 -
0.00877272727272738*m.x6 - 0.375227272727273*m.x7 - 0.206954545454545*m.x8)**2 +
0.0476190476190476*(0.100090909090909*m.x2 - 0.0211363636363637*m.x1 + 0.184227272727273*m.x3 +
0.218363636363637*m.x4 + 0.472681818181818*m.x5 + 0.0692272727272727*m.x6 - 0.0202272727272728*
m.x7 - 0.170954545454545*m.x8)**2 + 0.0476190476190476*(-0.0421363636363636*m.x1 -
0.0139090909090906*m.x2 - 0.0437727272727273*m.x3 - 0.0336363636363632*m.x4 + 0.0526818181818183*
m.x5 - 0.0157727272727273*m.x6 - 0.263227272727273*m.x7 - 0.202954545454545*m.x8)**2 +
0.0476190476190476*(0.124090909090909*m.x2 - 0.0471363636363638*m.x1 - 0.0197727272727273*m.x3 -
0.0106363636363633*m.x4 + 0.0406818181818183*m.x5 + 0.0182272727272728*m.x6 + 0.184772727272727*
m.x7 + 0.0170454545454546*m.x8)**2 + 0.0476190476190476*(-0.0331363636363637*m.x1 -
0.203909090909091*m.x2 - 0.107772727272727*m.x3 - 0.124636363636363*m.x4 - 0.153318181818182*m.x5
- 0.126772727272727*m.x6 - 0.0632272727272727*m.x7 - 0.138954545454545*m.x8)**2) +
1.09290909090909*m.x2 + 1.11977272727273*m.x3 + 1.12363636363636*m.x4 + 1.12131818181818*m.x5 +
1.09177272727273*m.x6 + 1.14122727272727*m.x7 + 1.12895454545455*m.x8 >= 0.05)
m.c3 = Constraint(expr=-sqrt(0.0476190476190476*(-0.00313636363636371*m.x1 - 0.150909090909091*m.x2 - 0.267772727272727*
m.x3 - 0.308636363636363*m.x4 - 0.423318181818182*m.x5 - 0.0687727272727274*m.x6 -
0.290227272727273*m.x7 + 0.548045454545455*m.x8)**2 + 0.0476190476190476*(0.0058636363636364*m.x1
- 0.0729090909090906*m.x2 - 0.384772727272727*m.x3 - 0.407636363636363*m.x4 - 0.459318181818182*
m.x5 - 0.0897727272727273*m.x6 - 0.373227272727273*m.x7 + 0.593045454545455*m.x8)**2 +
0.0476190476190476*(-0.0171363636363637*m.x1 - 0.0369090909090906*m.x2 + 0.251227272727273*m.x3
+ 0.261363636363637*m.x4 + 0.196681818181818*m.x5 + 0.0312272727272727*m.x6 + 0.212772727272727*
m.x7 - 0.368954545454545*m.x8)**2 + 0.0476190476190476*(0.0820909090909094*m.x2 -
0.0261363636363636*m.x1 + 0.116227272727273*m.x3 + 0.142363636363637*m.x4 + 0.158681818181818*
m.x5 + 0.0642272727272726*m.x6 - 0.116227272727273*m.x7 - 0.168954545454545*m.x8)**2 +
0.0476190476190476*(-0.0231363636363637*m.x1 - 0.0909090909090906*m.x2 - 0.193772727272727*m.x3
- 0.149636363636363*m.x4 - 0.0283181818181817*m.x5 - 0.0617727272727273*m.x6 +
0.0397727272727273*m.x7 + 0.0710454545454546*m.x8)**2 + 0.0476190476190476*(-0.00113636363636371*
m.x1 - 0.110909090909091*m.x2 - 0.0557727272727273*m.x3 - 0.0306363636363634*m.x4 +
0.0246818181818182*m.x5 - 0.0797727272727273*m.x6 + 0.184772727272727*m.x7 + 0.166045454545455*
m.x8)**2 + 0.0476190476190476*(0.0308636363636363*m.x1 - 0.114909090909091*m.x2 +
0.0642272727272726*m.x3 + 0.132363636363637*m.x4 + 0.185681818181818*m.x5 - 0.0687727272727274*
m.x6 - 0.0932272727272727*m.x7 + 1.08304545454545*m.x8)**2 + 0.0476190476190476*(
0.0488636363636363*m.x1 - 0.145909090909091*m.x2 + 0.203227272727273*m.x3 + 0.213363636363637*
m.x4 + 0.245681818181818*m.x5 - 0.0607727272727274*m.x6 + 0.0847727272727272*m.x7 +
0.167045454545455*m.x8)**2 + 0.0476190476190476*(0.0778636363636362*m.x1 - 0.0899090909090907*
m.x2 - 0.170772727272727*m.x3 - 0.160636363636363*m.x4 - 0.131318181818182*m.x5 -
0.0187727272727274*m.x6 - 0.164227272727273*m.x7 - 0.440954545454545*m.x8)**2 +
0.0476190476190476*(0.0388636363636363*m.x1 + 0.372090909090909*m.x2 + 0.0952272727272727*m.x3 +
0.0633636363636367*m.x4 + 0.0916818181818184*m.x5 + 0.219227272727273*m.x6 - 0.160227272727273*
m.x7 - 0.0449545454545452*m.x8)**2 + 0.0476190476190476*(0.0138636363636364*m.x1 -
0.107909090909091*m.x2 + 0.104227272727273*m.x3 + 0.111363636363637*m.x4 + 0.0956818181818184*
m.x5 - 0.0117727272727273*m.x6 + 0.0957727272727273*m.x7 - 0.256954545454545*m.x8)**2 +
0.0476190476190476*(0.0248636363636363*m.x1 + 0.0660909090909094*m.x2 - 0.0587727272727274*m.x3
- 0.0936363636363633*m.x4 - 0.218318181818182*m.x5 + 0.0582272727272726*m.x6 -
0.0672272727272727*m.x7 - 0.303954545454545*m.x8)**2 + 0.0476190476190476*(0.0018636363636364*
m.x1 + 0.273090909090909*m.x2 + 0.196227272727273*m.x3 + 0.202363636363637*m.x4 +
0.211681818181818*m.x5 + 0.121227272727273*m.x6 + 0.420772727272727*m.x7 - 0.122954545454545*m.x8
)**2 + 0.0476190476190476*(0.216090909090909*m.x2 - 0.0151363636363637*m.x1 + 0.0662272727272726*
m.x3 + 0.0373636363636367*m.x4 - 0.0353181818181816*m.x5 + 0.0642272727272726*m.x6 +
0.552772727272727*m.x7 + 0.0870454545454546*m.x8)**2 + 0.0476190476190476*(-0.0171363636363637*
m.x1 - 0.167909090909091*m.x2 - 0.0677727272727273*m.x3 - 0.100636363636363*m.x4 -
0.162318181818182*m.x5 - 0.0687727272727274*m.x6 + 0.104772727272727*m.x7 + 0.115045454545455*
m.x8)**2 + 0.0476190476190476*(-0.00713636363636372*m.x1 - 0.00690909090909053*m.x2 +
0.0452272727272727*m.x3 + 0.0553636363636367*m.x4 + 0.0436818181818184*m.x5 - 0.0157727272727273*
m.x6 + 0.141772727272727*m.x7 - 0.267954545454545*m.x8)**2 + 0.0476190476190476*(
0.0088636363636363*m.x1 + 0.119090909090909*m.x2 + 0.196227272727273*m.x3 + 0.168363636363637*
m.x4 + 0.0826818181818183*m.x5 + 0.0502272727272726*m.x6 - 0.0362272727272728*m.x7 -
0.151954545454545*m.x8)**2 + 0.0476190476190476*(0.0018636363636364*m.x1 - 0.0389090909090906*
m.x2 - 0.151772727272727*m.x3 - 0.185636363636363*m.x4 - 0.291318181818182*m.x5 -
0.00877272727272738*m.x6 - 0.375227272727273*m.x7 - 0.206954545454545*m.x8)**2 +
0.0476190476190476*(0.100090909090909*m.x2 - 0.0211363636363637*m.x1 + 0.184227272727273*m.x3 +
0.218363636363637*m.x4 + 0.472681818181818*m.x5 + 0.0692272727272727*m.x6 - 0.0202272727272728*
m.x7 - 0.170954545454545*m.x8)**2 + 0.0476190476190476*(-0.0421363636363636*m.x1 -
0.0139090909090906*m.x2 - 0.0437727272727273*m.x3 - 0.0336363636363632*m.x4 + 0.0526818181818183*
m.x5 - 0.0157727272727273*m.x6 - 0.263227272727273*m.x7 - 0.202954545454545*m.x8)**2 +
0.0476190476190476*(0.124090909090909*m.x2 - 0.0471363636363638*m.x1 - 0.0197727272727273*m.x3 -
0.0106363636363633*m.x4 + 0.0406818181818183*m.x5 + 0.0182272727272728*m.x6 + 0.184772727272727*
m.x7 + 0.0170454545454546*m.x8)**2 + 0.0476190476190476*(-0.0331363636363637*m.x1 -
0.203909090909091*m.x2 - 0.107772727272727*m.x3 - 0.124636363636363*m.x4 - 0.153318181818182*m.x5
- 0.126772727272727*m.x6 - 0.0632272727272727*m.x7 - 0.138954545454545*m.x8)**2) + m.x9 >= 0)
m.c4 = Constraint(expr= m.x1 + m.x2 + m.x3 + m.x4 + m.x5 + m.x6 + m.x7 + m.x8 == 1)
m.c5 = Constraint(expr= - 100000*m.x1 + m.i10 == 0)
m.c6 = Constraint(expr= - 32000*m.x2 + m.i11 == 0)
m.c7 = Constraint(expr= - 78000*m.x3 + m.i12 == 0)
m.c8 = Constraint(expr= - 56000*m.x4 + m.i13 == 0)
m.c9 = Constraint(expr= - 43000*m.x5 + m.i14 == 0)
m.c10 = Constraint(expr= - 100000*m.x6 + m.i15 == 0)
m.c11 = Constraint(expr= - 55000*m.x7 + m.i16 == 0)
m.c12 = Constraint(expr= - 78000*m.x8 + m.i17 == 0)
| 85.994382
| 120
| 0.598484
| 1,955
| 15,307
| 4.685934
| 0.144757
| 0.015719
| 0.086453
| 0.022923
| 0.903613
| 0.892588
| 0.892588
| 0.891715
| 0.891715
| 0.837245
| 0
| 0.652247
| 0.26452
| 15,307
| 177
| 121
| 86.480226
| 0.161485
| 0.044424
| 0
| 0.447552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.006993
| 0
| 0.006993
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
294dfa47382d90b975d926b796d5941d74e941e5
| 12,724
|
py
|
Python
|
template.py
|
1kastner/library
|
e0599dddb5cea3f56a7714ba5b15d4fe7e9388e2
|
[
"MIT"
] | null | null | null |
template.py
|
1kastner/library
|
e0599dddb5cea3f56a7714ba5b15d4fe7e9388e2
|
[
"MIT"
] | null | null | null |
template.py
|
1kastner/library
|
e0599dddb5cea3f56a7714ba5b15d4fe7e9388e2
|
[
"MIT"
] | 1
|
2018-07-25T19:30:14.000Z
|
2018-07-25T19:30:14.000Z
|
#!/usr/bin/python
#template collection
_file = open("basic_template.html")
basic_template = _file.read()
index = "<br><p>Welcome to this beautiful library at this nice day.</p><p>Today it is the <i><?= date ?></i>. On the left you can see the interactive menu with all the functions you need. </p><p>If you need any assistance either contact the library master or don't hesitate to ask the programmer (murph@gmx.net) for assistance."
successful = "Your action has been done successfully!"
error = "Sorry for the inconvenience, but an error occured: <br><p><?= msg ?></p>"
###back from javascript should be included!
###########################################
###########################################
### Persons
call_add_person = """<p><table><tr><form action='/do_add_person'></tr>
<tr><td>Student ID</td><td><input name='person_id' type='text'></input></td></tr>
<tr><td>First Name</td><td><input name='firstname' type='text'></input></td></tr>
<tr><td>Surname</td><td><input name='surname' type='text'></input></td></tr>
<tr><td>Class</td><td><input name='_class' type='text'></input></td></tr>
<tr><td>Semester</td><td><input name='semester' type='text'></input></td></tr>
<tr><td>Cellphone Number</td><td><input name='cellphone' type='text'></input></td></tr>
<tr><td></td></tr>
<tr><td><input lable='submit' type='submit'></td></td></form></table></p>"""
call_edit_person = """<p>Please specify which student should be edited<br><table><tr><form action='/do_edit_person'></tr>
<tr><td>Student ID</td><td><input name='person_id' type='text'></input></td></tr>
<tr><td></td></tr>
<tr><td><input lable='submit' type='submit'></td></td></form></table></p>
<i>If the student's ID is supposed to be changed delete the student and add him/her again.</i>"""
do_edit_person = """<table><tr><form action='/do_edit_person_2'></tr>
<tr><td>Student ID</td><td><?= person_id?><input name='person_id' type='hidden' value='<?= person_id ?>'></input></td></tr>
<tr><td>First Name</td><td><input name='firstname' type='text' value='<?= firstname ?>'></input></td></tr>
<tr><td>Surname</td><td><input name='surname' type='text' value='<?= surname ?>'></input></td></tr>
<tr><td>Cellphone Number</td><td><input name='cellphone' type='text' value='<?= cellphone ?>'></input></td></tr>
<tr><td>Class</td><td><input name='_class' type='text' value='<?= _class ?>'></input></td></tr>
<tr><td>Semester</td><td><input name='semester' type='text' value='<?= semester ?>'></input></td></tr>
<tr><td></td></tr>
<tr><td><input lable='submit' type='submit'></td></td></form></table></p><br>
The student ID cannot be changed. If that is necissary delete the student and add a new one."""
call_delete_person = """<p>Please specify which student should be <b>deleted</b><br><table><tr><form action='/do_delete_person'></tr>
<tr><td>Student ID</td><td><input name='person_id' type='text'></input></td></tr>
<tr><td></td></tr>
<tr><td><input lable='submit' type='submit'></td></td></form></table></p>"""
call_show_persons_none = """<p>In the Library System following students are registered:<br>
<p>No students are in the database</p>
"""
call_show_persons = """<p>In the Library System following students are registered:<br><br></p>
<table border=1>
<tr><td><b>Student ID</b></td><td><b>First Name</b></td><td><b>Surname</b></td><td><b>Class</b></td><td><b>Semester</b></td><td><b>Cellphone</b></td></tr>
<? for student_set in res: ?>
<tr>
<? for el in student_set: ?>
<td WIDTH=27% HEIGHT=19><?= el ?></td>
<? end ?>
</tr>
<? end ?>
</table>
"""
call_search_person = """<p>Please enter some keywords<table><tr><form action='/do_search_person'></tr>
<tr><td>Student ID</td><td><input name='person_id' type='text'></input></td></tr>
<tr><td>First Name</td><td><input name='firstname' type='text' </input></td></tr>
<tr><td>Surname</td><td><input name='surname' type='text'></input></td></tr>
<tr><td>Class</td><td><input name='_class' type='text'></input></td></tr>
<tr><td>Semester</td><td><input name='semester' type='text'></input></td></tr>
<tr><td>Cellphone Number</td><td><input name='cellphone' type='text'></input></td></tr>
<tr><td></td></tr>
<tr><td><input lable='submit' type='submit'></td></td></form></table></p>
"""
do_search_person = """<p>The result of the search was as follows:<br><br></p>
<table border=1>
<tr><td><b>Student ID</b></td><td><b>First Name</b></td><td><b>Surname</b></td><td><b>Class</b></td><td><b>Semester</b></td><td><b>Cellphone</b></td></tr>
<? for student_set in res: ?>
<tr><? for el in student_set: ?>
<td WIDTH=27% HEIGHT=19><?= el ?></td>
<? end ?></tr>
<? end ?>
<? end ?></table>
"""
do_search_person_none = """<p>The result of the search was as follows:<br>
<p>No students could be found</p>
"""
####################################################
####################################################
### BOOKS ######################################
call_add_book = """<p><table><tr><form action='/do_add_book'></tr>
<tr><td>Author</td><td><input name='author' type='text'></input></td></tr>
<tr><td>Title</td><td><input name='title' type='text'></input></td></tr>
<tr><td>Amount</td><td><input name='amount' type='text'></input></td></tr>
<tr><td>Tags</td><td><input name='tags' type='text'></input></td></tr>
<tr><td></td></tr>
<tr><td><input lable='submit' type='submit'></td></td></form></table></p><br>
Please seperate the tags by colons (',')"""
call_edit_book = """<p>Please specify which book should be edited<br><table><tr><form action='/do_edit_book'></tr>
<tr><td>Book ID</td><td><input name='book_id' type='text'></input></td></tr>
<tr><td></td></tr>
<tr><td><input lable='submit' type='submit'></td></td></form></table></p>
"""
do_edit_book = """<p><table><tr><form action='/do_edit_book_2'></tr>
<tr><td>Book ID</td><td><?= book_id?><input name='book_id' type='hidden' value='<?= book_id ?>'></td></tr>
<tr><td>Author</td><td><input name='author' type='text' value='<?= author ?>'></input></td></tr>
<tr><td>Title</td><td><input name='title' type='text' value='<?= title?>'></input></td></tr>
<tr><td>Amount</td><td><input name='amount' type='text' value='<?= amount ?>'></input></td></tr>
<tr><td>Tags</td><td><input name='tags' type='text' value='<?= tags ?>'></input></td></tr>
<tr><td></td></tr>
<tr><td><input lable='submit' type='submit'></td></td></form></table></p><br>
Please seperate the tags by colons (',')<br>
The Book ID cannot be edited. Please delete the book and add it again if a new ID is nessicary."""
call_delete_book = """<p>Please specify which book should be <b>deleted</b><br><table><tr><form action='/do_delete_book'></tr>
<tr><td>Book ID</td><td><input name='book_id' type='text'></input></td></tr>
<tr><td></td></tr>
<tr><td><input lable='submit' type='submit'></td></td></form></table></p>"""
call_show_books_none = """<p>In the Library System following books are registered:<br>
<p>No books are in the database</p>
"""
call_show_books = """<p>In the Library System following books are registered:<br><br></p>
<table border=1>
<tr><td><b>Book ID</b></td>
<td><b>Author</b></td>
<td><b>Title</b></td>
<td><b>Amount</b></td>
<td><b>Tags</b></td></tr>
<? for student_set in res: ?>
<tr><? for el in student_set: ?>
<td WIDTH=24% HEIGHT=19><? if el: ?><?= el ?><? end ?><? if not el: ?><i>None</i><? end ?></td>
<? end ?></tr>
<? end ?>
<? end ?></table>
"""
call_search_book = """<p>Please enter some keywords<table><tr><form action='/do_search_book'></tr>
<tr><td>Book ID</td><td><input name='book_id' type='text'></input></td></tr>
<tr><td>Author</td><td><input name='author' type='text'></input></td></tr>
<tr><td>Title</td><td><input name='title' type='text'></input></td></tr>
<tr><td>Tags</td><td><input name='tags' type='text'></input></td></tr>
<tr><td></td></tr>
<tr><td><input lable='submit' type='submit'></td></td></form></table></p><br>
Please seperate the tags by colons (',')
"""
do_search_book = """<p>The result of the search was as follows:<br><br></p>
<table border=1>
<tr><td><b>Book ID</b></td>
<td><b>Author</b></td>
<td><b>Title</b></td>
<td><b>Amount</b></td>
<td><b>Tags</b></td></tr>
<? for row in res: ?>
<tr><? for el in row: ?>
<td WIDTH=27% HEIGHT=19><?= el ?></td>
<? end ?></tr>
<? end ?>
<? end ?></table>
"""
do_search_book_none = """<p>The result of the search was as follows:<br>
<p>No books could be found</p>
"""
############################################
############################################
###Library
call_lend_book = """Please specify which book will be lent to whom:<br><br>
<table><form action='/do_lend_book'>
<tr><td>Student ID</td><td><input name='person_id' type='text'></input></td></tr>
<tr><td>Book ID</td><td><input name='book_id' type='text'></input></td></tr>
<tr><td>Amount</td><td><input name='amount' type='text'></input></td></tr>
<tr><td></td></tr>
<tr><td><input lable='submit' type='submit'></td></td></form></table></p>
"""
call_return_book = """Please specify who wants to return which book<br><br>
<table><form action='/do_return_book'>
<tr><td>Student ID</td><td><input name='person_id' type='text'></input></td></tr>
<tr><td>Book ID</td><td><input name='book_id' type='text'></input></td></tr>
<tr><td>Amount</td><td><input name='amount' type='text'></input></td></tr>
<tr><td></td></tr>
<tr><td><input lable='submit' type='submit'></td></td></form></table></p>
"""
call_show_lent_books = """<p>The result of the search was as follows:<br><br></p>
<table border=1>
<tr><td><b>Lend ID</b></td>
<td><b>Student ID</b></td>
<td><b>First name</b></td>
<td><b>Surname</b></td>
<td><b>Book ID</b></td>
<td><b>Author</b></td>
<td><b>Title</b></td>
<td><b>Amount</b></td>
<td><b>Return Date</b></td></tr>
<? for student_set in res: ?>
<tr><? for el in student_set: ?>
<td WIDTH=16% HEIGHT=19><?= el ?></td>
<? end ?></tr>
<? end ?>
<? end ?></table>
"""
call_show_lent_books_none = """<p>The result of the search was as follows:<br>
<p>No lent books could be found</p>
"""
call_lent_books_to = """<p>Please specify for which student you want to have the list of the books to return<br><table><tr><form action='/do_lent_books_to'></tr>
<tr><td>Student ID</td><td><input name='person_id' type='text'></input></td></tr>
<tr><td></td></tr>
<tr><td><input lable='submit' type='submit'></td></td></form></table></p>
"""
call_show_lent_books_to = """<p>The result for student '<?= person_id ?>' was as follows:<br><br></p>
<table border=1>
<tr><td><b>Lend ID</b></td>
<td><b>Book ID</b></td>
<td><b>Author</b></td>
<td><b>Title</b></td>
<td><b>Amount</b></td>
<td><b>Return Date</b></td></tr>
<? for student_set in res: ?>
<tr><? for el in student_set: ?>
<td WIDTH=16% HEIGHT=19><?= el ?></td>
<? end ?></tr>
<? end ?>
<? end ?></table>
"""
call_show_lent_books_to_when_deleting = """<p>The student '<?= person_id ?>' could not be deleted because s/he still possesses books of the library:<br><br></p>
<table border=1>
<tr><td><b>Lend ID</b></td><td><b>Book ID</b></td><td><b>Author</b></td><td><b>Title</b></td><td><b>Amount</b></td><td><b>Return Date</b></td></tr>
<? for student_set in res: ?>
<tr><? for el in student_set: ?>
<td WIDTH=16% HEIGHT=19><?= el ?></td>
<? end ?></tr>
<? end ?>
<? end ?></table>
"""
call_show_lent_books_to_none = """<p>The result of the search was as follows:<br>
<p>No lent books could be found</p>
"""
call_lent_books_none = """<p>The result of the search was as follows:<br>
<p>No lent books could be found</p>
"""
call_show_books_over_limit = """
"<p>The result of the search was as follows:<br><br></p>
<table border=1>
<tr><td><b>Lend ID</b></td>
<td><b>Student ID</b></td>
<td><b>First name</b></td>
<td><b>Surname</b></td>
<td><b>Book ID</b></td>
<td><b>Author</b></td>
<td><b>Title</b></td></tr>
<? for student_set in res: ?>
<tr><? for el in student_set: ?>
<td WIDTH=16% HEIGHT=19><?= el ?></td>
<? end ?></tr>
<? end ?>
<? end ?></table>
"""
call_show_books_over_limit_none = """<p>The result of the search was as follows:<br>
<p>No lent books over limit could be found</p>
"""
call_backup = """<p>Welcome to the backup function</p><br>
<table>
<tr>
<td>
For creating a backup, please press:
</td>
<td>
<a href='/create_backup'>HERE</a></td>
</tr>
<tr>
<td>
For inserting an old backup:
</td>
<td>
<form action='/upload_backup' method="post" enctype="multipart/form-data"><input type="file" name="myFile" /><br />
<input type="submit" value="INSERT BACKUP" />
</td>
</tr>
</table>
"""
| 41.718033
| 328
| 0.592188
| 2,120
| 12,724
| 3.481132
| 0.095283
| 0.060705
| 0.054472
| 0.060705
| 0.78252
| 0.760976
| 0.750542
| 0.719919
| 0.70393
| 0.70393
| 0
| 0.003733
| 0.115765
| 12,724
| 304
| 329
| 41.855263
| 0.652209
| 0.007938
| 0
| 0.708661
| 0
| 0.271654
| 0.918565
| 0.316873
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
462d6b5a496ad24b30ff1d69c2cf0df37016e086
| 20,617
|
py
|
Python
|
sdk/python/pulumi_hcloud/floating_ip.py
|
pulumi/pulumi-hcloud
|
332962b39a3a9f23e466eb3b7abc1347af1a118f
|
[
"ECL-2.0",
"Apache-2.0"
] | 13
|
2020-08-06T18:30:45.000Z
|
2022-02-21T09:49:51.000Z
|
sdk/python/pulumi_hcloud/floating_ip.py
|
pulumi/pulumi-hcloud
|
332962b39a3a9f23e466eb3b7abc1347af1a118f
|
[
"ECL-2.0",
"Apache-2.0"
] | 71
|
2020-07-02T11:19:44.000Z
|
2022-03-25T19:34:21.000Z
|
sdk/python/pulumi_hcloud/floating_ip.py
|
pulumi/pulumi-hcloud
|
332962b39a3a9f23e466eb3b7abc1347af1a118f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-07-21T19:46:49.000Z
|
2020-07-21T19:46:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['FloatingIpArgs', 'FloatingIp']
@pulumi.input_type
class FloatingIpArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
delete_protection: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
home_location: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
server_id: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a FloatingIp resource.
:param pulumi.Input[str] type: Type of the Floating IP. `ipv4` `ipv6`
:param pulumi.Input[bool] delete_protection: Enable or disable delete protection.
:param pulumi.Input[str] description: Description of the Floating IP.
:param pulumi.Input[str] home_location: Home location (routing is optimized for that location). Optional if server_id argument is passed.
:param pulumi.Input[Mapping[str, Any]] labels: User-defined labels (key-value pairs) should be created with.
:param pulumi.Input[str] name: Name of the Floating IP.
:param pulumi.Input[int] server_id: Server to assign the Floating IP to.
"""
pulumi.set(__self__, "type", type)
if delete_protection is not None:
pulumi.set(__self__, "delete_protection", delete_protection)
if description is not None:
pulumi.set(__self__, "description", description)
if home_location is not None:
pulumi.set(__self__, "home_location", home_location)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if server_id is not None:
pulumi.set(__self__, "server_id", server_id)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of the Floating IP. `ipv4` `ipv6`
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="deleteProtection")
def delete_protection(self) -> Optional[pulumi.Input[bool]]:
"""
Enable or disable delete protection.
"""
return pulumi.get(self, "delete_protection")
@delete_protection.setter
def delete_protection(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_protection", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the Floating IP.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="homeLocation")
def home_location(self) -> Optional[pulumi.Input[str]]:
"""
Home location (routing is optimized for that location). Optional if server_id argument is passed.
"""
return pulumi.get(self, "home_location")
@home_location.setter
def home_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "home_location", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
User-defined labels (key-value pairs) should be created with.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Floating IP.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="serverId")
def server_id(self) -> Optional[pulumi.Input[int]]:
"""
Server to assign the Floating IP to.
"""
return pulumi.get(self, "server_id")
@server_id.setter
def server_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "server_id", value)
@pulumi.input_type
class _FloatingIpState:
def __init__(__self__, *,
delete_protection: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
home_location: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
ip_network: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
server_id: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering FloatingIp resources.
:param pulumi.Input[bool] delete_protection: Enable or disable delete protection.
:param pulumi.Input[str] description: Description of the Floating IP.
:param pulumi.Input[str] home_location: Home location (routing is optimized for that location). Optional if server_id argument is passed.
:param pulumi.Input[str] ip_address: (string) IP Address of the Floating IP.
:param pulumi.Input[str] ip_network: (string) IPv6 subnet. (Only set if `type` is `ipv6`)
:param pulumi.Input[Mapping[str, Any]] labels: User-defined labels (key-value pairs) should be created with.
:param pulumi.Input[str] name: Name of the Floating IP.
:param pulumi.Input[int] server_id: Server to assign the Floating IP to.
:param pulumi.Input[str] type: Type of the Floating IP. `ipv4` `ipv6`
"""
if delete_protection is not None:
pulumi.set(__self__, "delete_protection", delete_protection)
if description is not None:
pulumi.set(__self__, "description", description)
if home_location is not None:
pulumi.set(__self__, "home_location", home_location)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if ip_network is not None:
pulumi.set(__self__, "ip_network", ip_network)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if server_id is not None:
pulumi.set(__self__, "server_id", server_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="deleteProtection")
def delete_protection(self) -> Optional[pulumi.Input[bool]]:
"""
Enable or disable delete protection.
"""
return pulumi.get(self, "delete_protection")
@delete_protection.setter
def delete_protection(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_protection", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the Floating IP.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="homeLocation")
def home_location(self) -> Optional[pulumi.Input[str]]:
"""
Home location (routing is optimized for that location). Optional if server_id argument is passed.
"""
return pulumi.get(self, "home_location")
@home_location.setter
def home_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "home_location", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
(string) IP Address of the Floating IP.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="ipNetwork")
def ip_network(self) -> Optional[pulumi.Input[str]]:
"""
(string) IPv6 subnet. (Only set if `type` is `ipv6`)
"""
return pulumi.get(self, "ip_network")
@ip_network.setter
def ip_network(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_network", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
User-defined labels (key-value pairs) should be created with.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Floating IP.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="serverId")
def server_id(self) -> Optional[pulumi.Input[int]]:
"""
Server to assign the Floating IP to.
"""
return pulumi.get(self, "server_id")
@server_id.setter
def server_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "server_id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type of the Floating IP. `ipv4` `ipv6`
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
class FloatingIp(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
delete_protection: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
home_location: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
server_id: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Hetzner Cloud Floating IP to represent a publicly-accessible static IP address that can be mapped to one of your servers.
## Example Usage
```python
import pulumi
import pulumi_hcloud as hcloud
node1 = hcloud.Server("node1",
image="debian-9",
server_type="cx11")
master = hcloud.FloatingIp("master",
type="ipv4",
server_id=node1.id)
```
## Import
Floating IPs can be imported using its `id`
```sh
$ pulumi import hcloud:index/floatingIp:FloatingIp myip <id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] delete_protection: Enable or disable delete protection.
:param pulumi.Input[str] description: Description of the Floating IP.
:param pulumi.Input[str] home_location: Home location (routing is optimized for that location). Optional if server_id argument is passed.
:param pulumi.Input[Mapping[str, Any]] labels: User-defined labels (key-value pairs) should be created with.
:param pulumi.Input[str] name: Name of the Floating IP.
:param pulumi.Input[int] server_id: Server to assign the Floating IP to.
:param pulumi.Input[str] type: Type of the Floating IP. `ipv4` `ipv6`
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FloatingIpArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Hetzner Cloud Floating IP to represent a publicly-accessible static IP address that can be mapped to one of your servers.
## Example Usage
```python
import pulumi
import pulumi_hcloud as hcloud
node1 = hcloud.Server("node1",
image="debian-9",
server_type="cx11")
master = hcloud.FloatingIp("master",
type="ipv4",
server_id=node1.id)
```
## Import
Floating IPs can be imported using its `id`
```sh
$ pulumi import hcloud:index/floatingIp:FloatingIp myip <id>
```
:param str resource_name: The name of the resource.
:param FloatingIpArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FloatingIpArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
delete_protection: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
home_location: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
server_id: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FloatingIpArgs.__new__(FloatingIpArgs)
__props__.__dict__["delete_protection"] = delete_protection
__props__.__dict__["description"] = description
__props__.__dict__["home_location"] = home_location
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
__props__.__dict__["server_id"] = server_id
if type is None and not opts.urn:
raise TypeError("Missing required property 'type'")
__props__.__dict__["type"] = type
__props__.__dict__["ip_address"] = None
__props__.__dict__["ip_network"] = None
super(FloatingIp, __self__).__init__(
'hcloud:index/floatingIp:FloatingIp',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
delete_protection: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
home_location: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
ip_network: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
server_id: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None) -> 'FloatingIp':
"""
Get an existing FloatingIp resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] delete_protection: Enable or disable delete protection.
:param pulumi.Input[str] description: Description of the Floating IP.
:param pulumi.Input[str] home_location: Home location (routing is optimized for that location). Optional if server_id argument is passed.
:param pulumi.Input[str] ip_address: (string) IP Address of the Floating IP.
:param pulumi.Input[str] ip_network: (string) IPv6 subnet. (Only set if `type` is `ipv6`)
:param pulumi.Input[Mapping[str, Any]] labels: User-defined labels (key-value pairs) should be created with.
:param pulumi.Input[str] name: Name of the Floating IP.
:param pulumi.Input[int] server_id: Server to assign the Floating IP to.
:param pulumi.Input[str] type: Type of the Floating IP. `ipv4` `ipv6`
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FloatingIpState.__new__(_FloatingIpState)
__props__.__dict__["delete_protection"] = delete_protection
__props__.__dict__["description"] = description
__props__.__dict__["home_location"] = home_location
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["ip_network"] = ip_network
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
__props__.__dict__["server_id"] = server_id
__props__.__dict__["type"] = type
return FloatingIp(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="deleteProtection")
def delete_protection(self) -> pulumi.Output[Optional[bool]]:
"""
Enable or disable delete protection.
"""
return pulumi.get(self, "delete_protection")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the Floating IP.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="homeLocation")
def home_location(self) -> pulumi.Output[str]:
"""
Home location (routing is optimized for that location). Optional if server_id argument is passed.
"""
return pulumi.get(self, "home_location")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[str]:
"""
(string) IP Address of the Floating IP.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="ipNetwork")
def ip_network(self) -> pulumi.Output[str]:
"""
(string) IPv6 subnet. (Only set if `type` is `ipv6`)
"""
return pulumi.get(self, "ip_network")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
User-defined labels (key-value pairs) should be created with.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the Floating IP.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="serverId")
def server_id(self) -> pulumi.Output[int]:
"""
Server to assign the Floating IP to.
"""
return pulumi.get(self, "server_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the Floating IP. `ipv4` `ipv6`
"""
return pulumi.get(self, "type")
| 38.826742
| 145
| 0.621089
| 2,399
| 20,617
| 5.132138
| 0.078366
| 0.095598
| 0.104938
| 0.073262
| 0.845841
| 0.823343
| 0.80783
| 0.796215
| 0.788418
| 0.752355
| 0
| 0.002447
| 0.266722
| 20,617
| 530
| 146
| 38.9
| 0.811946
| 0.284377
| 0
| 0.745763
| 1
| 0
| 0.081404
| 0.002523
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162712
| false
| 0.00339
| 0.016949
| 0
| 0.277966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d3e42b5df74a355323950d246e223fe6a30a8f53
| 17
|
py
|
Python
|
examples/tuple.py
|
LayneInNL/py2flows
|
5ecb555c64350cb13c3885a78fe89a40994e9d0e
|
[
"Apache-2.0"
] | 3
|
2022-03-21T12:10:37.000Z
|
2022-03-24T13:31:19.000Z
|
examples/tuple.py
|
Robin199412/py2flows
|
52e5e5bdbd83ede4a994f2e429dac770a7926032
|
[
"Apache-2.0"
] | 1
|
2022-03-17T02:09:37.000Z
|
2022-03-17T10:08:14.000Z
|
examples/tuple.py
|
LayneInNL/py2flows
|
5ecb555c64350cb13c3885a78fe89a40994e9d0e
|
[
"Apache-2.0"
] | 1
|
2022-03-21T12:10:18.000Z
|
2022-03-21T12:10:18.000Z
|
(1, 2, 3, x + 1)
| 8.5
| 16
| 0.294118
| 5
| 17
| 1
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.363636
| 0.352941
| 17
| 1
| 17
| 17
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
312f942c0108e0d87b14de92b18864a7bfa0edf5
| 31,864
|
py
|
Python
|
dingtalk/python/alibabacloud_dingtalk/diot_1_0/client.py
|
aliyun/dingtalk-sdk
|
ab4f856b8cfe94f6b69f10a0730a2e5a7d4901c5
|
[
"Apache-2.0"
] | 15
|
2020-08-27T04:10:26.000Z
|
2022-03-07T06:25:42.000Z
|
dingtalk/python/alibabacloud_dingtalk/diot_1_0/client.py
|
aliyun/dingtalk-sdk
|
ab4f856b8cfe94f6b69f10a0730a2e5a7d4901c5
|
[
"Apache-2.0"
] | 1
|
2020-09-27T01:30:46.000Z
|
2021-12-29T09:15:34.000Z
|
dingtalk/python/alibabacloud_dingtalk/diot_1_0/client.py
|
aliyun/dingtalk-sdk
|
ab4f856b8cfe94f6b69f10a0730a2e5a7d4901c5
|
[
"Apache-2.0"
] | 5
|
2020-08-27T04:07:44.000Z
|
2021-12-03T02:55:20.000Z
|
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_dingtalk.diot_1_0 import models as dingtalkdiot__1__0_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_openapi_util.client import Client as OpenApiUtilClient
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = ''
if UtilClient.empty(self._endpoint):
self._endpoint = 'api.dingtalk.com'
def batch_delete_device(
self,
request: dingtalkdiot__1__0_models.BatchDeleteDeviceRequest,
) -> dingtalkdiot__1__0_models.BatchDeleteDeviceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.BatchDeleteDeviceHeaders()
return self.batch_delete_device_with_options(request, headers, runtime)
async def batch_delete_device_async(
self,
request: dingtalkdiot__1__0_models.BatchDeleteDeviceRequest,
) -> dingtalkdiot__1__0_models.BatchDeleteDeviceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.BatchDeleteDeviceHeaders()
return await self.batch_delete_device_with_options_async(request, headers, runtime)
def batch_delete_device_with_options(
self,
request: dingtalkdiot__1__0_models.BatchDeleteDeviceRequest,
headers: dingtalkdiot__1__0_models.BatchDeleteDeviceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.BatchDeleteDeviceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.device_ids):
body['deviceIds'] = request.device_ids
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.BatchDeleteDeviceResponse(),
self.do_roarequest('BatchDeleteDevice', 'diot_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/diot/devices/remove', 'json', req, runtime)
)
async def batch_delete_device_with_options_async(
self,
request: dingtalkdiot__1__0_models.BatchDeleteDeviceRequest,
headers: dingtalkdiot__1__0_models.BatchDeleteDeviceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.BatchDeleteDeviceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.device_ids):
body['deviceIds'] = request.device_ids
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.BatchDeleteDeviceResponse(),
await self.do_roarequest_async('BatchDeleteDevice', 'diot_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/diot/devices/remove', 'json', req, runtime)
)
def push_event(
self,
request: dingtalkdiot__1__0_models.PushEventRequest,
) -> dingtalkdiot__1__0_models.PushEventResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.PushEventHeaders()
return self.push_event_with_options(request, headers, runtime)
async def push_event_async(
self,
request: dingtalkdiot__1__0_models.PushEventRequest,
) -> dingtalkdiot__1__0_models.PushEventResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.PushEventHeaders()
return await self.push_event_with_options_async(request, headers, runtime)
def push_event_with_options(
self,
request: dingtalkdiot__1__0_models.PushEventRequest,
headers: dingtalkdiot__1__0_models.PushEventHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.PushEventResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.event_id):
body['eventId'] = request.event_id
if not UtilClient.is_unset(request.event_type):
body['eventType'] = request.event_type
if not UtilClient.is_unset(request.event_name):
body['eventName'] = request.event_name
if not UtilClient.is_unset(request.occurrence_time):
body['occurrenceTime'] = request.occurrence_time
if not UtilClient.is_unset(request.device_id):
body['deviceId'] = request.device_id
if not UtilClient.is_unset(request.location):
body['location'] = request.location
if not UtilClient.is_unset(request.msg):
body['msg'] = request.msg
if not UtilClient.is_unset(request.pic_urls):
body['picUrls'] = request.pic_urls
if not UtilClient.is_unset(request.extra_data):
body['extraData'] = request.extra_data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.PushEventResponse(),
self.do_roarequest('PushEvent', 'diot_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/diot/events/push', 'json', req, runtime)
)
async def push_event_with_options_async(
self,
request: dingtalkdiot__1__0_models.PushEventRequest,
headers: dingtalkdiot__1__0_models.PushEventHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.PushEventResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.event_id):
body['eventId'] = request.event_id
if not UtilClient.is_unset(request.event_type):
body['eventType'] = request.event_type
if not UtilClient.is_unset(request.event_name):
body['eventName'] = request.event_name
if not UtilClient.is_unset(request.occurrence_time):
body['occurrenceTime'] = request.occurrence_time
if not UtilClient.is_unset(request.device_id):
body['deviceId'] = request.device_id
if not UtilClient.is_unset(request.location):
body['location'] = request.location
if not UtilClient.is_unset(request.msg):
body['msg'] = request.msg
if not UtilClient.is_unset(request.pic_urls):
body['picUrls'] = request.pic_urls
if not UtilClient.is_unset(request.extra_data):
body['extraData'] = request.extra_data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.PushEventResponse(),
await self.do_roarequest_async('PushEvent', 'diot_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/diot/events/push', 'json', req, runtime)
)
def device_conference(
self,
request: dingtalkdiot__1__0_models.DeviceConferenceRequest,
) -> dingtalkdiot__1__0_models.DeviceConferenceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.DeviceConferenceHeaders()
return self.device_conference_with_options(request, headers, runtime)
async def device_conference_async(
self,
request: dingtalkdiot__1__0_models.DeviceConferenceRequest,
) -> dingtalkdiot__1__0_models.DeviceConferenceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.DeviceConferenceHeaders()
return await self.device_conference_with_options_async(request, headers, runtime)
def device_conference_with_options(
self,
request: dingtalkdiot__1__0_models.DeviceConferenceRequest,
headers: dingtalkdiot__1__0_models.DeviceConferenceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.DeviceConferenceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.conf_title):
body['confTitle'] = request.conf_title
if not UtilClient.is_unset(request.conference_id):
body['conferenceId'] = request.conference_id
if not UtilClient.is_unset(request.conference_password):
body['conferencePassword'] = request.conference_password
if not UtilClient.is_unset(request.device_ids):
body['deviceIds'] = request.device_ids
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.DeviceConferenceResponse(),
self.do_roarequest('DeviceConference', 'diot_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/diot/deviceConferences/initiate', 'json', req, runtime)
)
async def device_conference_with_options_async(
self,
request: dingtalkdiot__1__0_models.DeviceConferenceRequest,
headers: dingtalkdiot__1__0_models.DeviceConferenceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.DeviceConferenceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.conf_title):
body['confTitle'] = request.conf_title
if not UtilClient.is_unset(request.conference_id):
body['conferenceId'] = request.conference_id
if not UtilClient.is_unset(request.conference_password):
body['conferencePassword'] = request.conference_password
if not UtilClient.is_unset(request.device_ids):
body['deviceIds'] = request.device_ids
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.DeviceConferenceResponse(),
await self.do_roarequest_async('DeviceConference', 'diot_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/diot/deviceConferences/initiate', 'json', req, runtime)
)
def register_device(
self,
request: dingtalkdiot__1__0_models.RegisterDeviceRequest,
) -> dingtalkdiot__1__0_models.RegisterDeviceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.RegisterDeviceHeaders()
return self.register_device_with_options(request, headers, runtime)
async def register_device_async(
self,
request: dingtalkdiot__1__0_models.RegisterDeviceRequest,
) -> dingtalkdiot__1__0_models.RegisterDeviceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.RegisterDeviceHeaders()
return await self.register_device_with_options_async(request, headers, runtime)
def register_device_with_options(
self,
request: dingtalkdiot__1__0_models.RegisterDeviceRequest,
headers: dingtalkdiot__1__0_models.RegisterDeviceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.RegisterDeviceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.id):
body['id'] = request.id
if not UtilClient.is_unset(request.device_name):
body['deviceName'] = request.device_name
if not UtilClient.is_unset(request.nick_name):
body['nickName'] = request.nick_name
if not UtilClient.is_unset(request.location):
body['location'] = request.location
if not UtilClient.is_unset(request.device_status):
body['deviceStatus'] = request.device_status
if not UtilClient.is_unset(request.device_type):
body['deviceType'] = request.device_type
if not UtilClient.is_unset(request.device_type_name):
body['deviceTypeName'] = request.device_type_name
if not UtilClient.is_unset(request.parent_id):
body['parentId'] = request.parent_id
if not UtilClient.is_unset(request.product_type):
body['productType'] = request.product_type
if not UtilClient.is_unset(request.live_url):
body['liveUrl'] = request.live_url
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.RegisterDeviceResponse(),
self.do_roarequest('RegisterDevice', 'diot_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/diot/devices/register', 'json', req, runtime)
)
async def register_device_with_options_async(
self,
request: dingtalkdiot__1__0_models.RegisterDeviceRequest,
headers: dingtalkdiot__1__0_models.RegisterDeviceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.RegisterDeviceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.id):
body['id'] = request.id
if not UtilClient.is_unset(request.device_name):
body['deviceName'] = request.device_name
if not UtilClient.is_unset(request.nick_name):
body['nickName'] = request.nick_name
if not UtilClient.is_unset(request.location):
body['location'] = request.location
if not UtilClient.is_unset(request.device_status):
body['deviceStatus'] = request.device_status
if not UtilClient.is_unset(request.device_type):
body['deviceType'] = request.device_type
if not UtilClient.is_unset(request.device_type_name):
body['deviceTypeName'] = request.device_type_name
if not UtilClient.is_unset(request.parent_id):
body['parentId'] = request.parent_id
if not UtilClient.is_unset(request.product_type):
body['productType'] = request.product_type
if not UtilClient.is_unset(request.live_url):
body['liveUrl'] = request.live_url
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.RegisterDeviceResponse(),
await self.do_roarequest_async('RegisterDevice', 'diot_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/diot/devices/register', 'json', req, runtime)
)
def batch_register_device(
self,
request: dingtalkdiot__1__0_models.BatchRegisterDeviceRequest,
) -> dingtalkdiot__1__0_models.BatchRegisterDeviceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.BatchRegisterDeviceHeaders()
return self.batch_register_device_with_options(request, headers, runtime)
async def batch_register_device_async(
self,
request: dingtalkdiot__1__0_models.BatchRegisterDeviceRequest,
) -> dingtalkdiot__1__0_models.BatchRegisterDeviceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.BatchRegisterDeviceHeaders()
return await self.batch_register_device_with_options_async(request, headers, runtime)
def batch_register_device_with_options(
self,
request: dingtalkdiot__1__0_models.BatchRegisterDeviceRequest,
headers: dingtalkdiot__1__0_models.BatchRegisterDeviceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.BatchRegisterDeviceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.devices):
body['devices'] = request.devices
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.BatchRegisterDeviceResponse(),
self.do_roarequest('BatchRegisterDevice', 'diot_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/diot/devices/registrations/batch', 'json', req, runtime)
)
async def batch_register_device_with_options_async(
self,
request: dingtalkdiot__1__0_models.BatchRegisterDeviceRequest,
headers: dingtalkdiot__1__0_models.BatchRegisterDeviceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.BatchRegisterDeviceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.devices):
body['devices'] = request.devices
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.BatchRegisterDeviceResponse(),
await self.do_roarequest_async('BatchRegisterDevice', 'diot_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/diot/devices/registrations/batch', 'json', req, runtime)
)
def batch_register_event_type(
self,
request: dingtalkdiot__1__0_models.BatchRegisterEventTypeRequest,
) -> dingtalkdiot__1__0_models.BatchRegisterEventTypeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.BatchRegisterEventTypeHeaders()
return self.batch_register_event_type_with_options(request, headers, runtime)
async def batch_register_event_type_async(
self,
request: dingtalkdiot__1__0_models.BatchRegisterEventTypeRequest,
) -> dingtalkdiot__1__0_models.BatchRegisterEventTypeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.BatchRegisterEventTypeHeaders()
return await self.batch_register_event_type_with_options_async(request, headers, runtime)
def batch_register_event_type_with_options(
self,
request: dingtalkdiot__1__0_models.BatchRegisterEventTypeRequest,
headers: dingtalkdiot__1__0_models.BatchRegisterEventTypeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.BatchRegisterEventTypeResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.event_types):
body['eventTypes'] = request.event_types
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.BatchRegisterEventTypeResponse(),
self.do_roarequest('BatchRegisterEventType', 'diot_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/diot/eventTypes/registrations/batch', 'json', req, runtime)
)
async def batch_register_event_type_with_options_async(
self,
request: dingtalkdiot__1__0_models.BatchRegisterEventTypeRequest,
headers: dingtalkdiot__1__0_models.BatchRegisterEventTypeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.BatchRegisterEventTypeResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.event_types):
body['eventTypes'] = request.event_types
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.BatchRegisterEventTypeResponse(),
await self.do_roarequest_async('BatchRegisterEventType', 'diot_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/diot/eventTypes/registrations/batch', 'json', req, runtime)
)
def batch_update_device(
self,
request: dingtalkdiot__1__0_models.BatchUpdateDeviceRequest,
) -> dingtalkdiot__1__0_models.BatchUpdateDeviceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.BatchUpdateDeviceHeaders()
return self.batch_update_device_with_options(request, headers, runtime)
async def batch_update_device_async(
self,
request: dingtalkdiot__1__0_models.BatchUpdateDeviceRequest,
) -> dingtalkdiot__1__0_models.BatchUpdateDeviceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.BatchUpdateDeviceHeaders()
return await self.batch_update_device_with_options_async(request, headers, runtime)
def batch_update_device_with_options(
self,
request: dingtalkdiot__1__0_models.BatchUpdateDeviceRequest,
headers: dingtalkdiot__1__0_models.BatchUpdateDeviceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.BatchUpdateDeviceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.devices):
body['devices'] = request.devices
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.BatchUpdateDeviceResponse(),
self.do_roarequest('BatchUpdateDevice', 'diot_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/diot/devices/batch', 'json', req, runtime)
)
async def batch_update_device_with_options_async(
self,
request: dingtalkdiot__1__0_models.BatchUpdateDeviceRequest,
headers: dingtalkdiot__1__0_models.BatchUpdateDeviceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.BatchUpdateDeviceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.devices):
body['devices'] = request.devices
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.BatchUpdateDeviceResponse(),
await self.do_roarequest_async('BatchUpdateDevice', 'diot_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/diot/devices/batch', 'json', req, runtime)
)
def bind_system(
self,
request: dingtalkdiot__1__0_models.BindSystemRequest,
) -> dingtalkdiot__1__0_models.BindSystemResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.BindSystemHeaders()
return self.bind_system_with_options(request, headers, runtime)
async def bind_system_async(
self,
request: dingtalkdiot__1__0_models.BindSystemRequest,
) -> dingtalkdiot__1__0_models.BindSystemResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdiot__1__0_models.BindSystemHeaders()
return await self.bind_system_with_options_async(request, headers, runtime)
def bind_system_with_options(
self,
request: dingtalkdiot__1__0_models.BindSystemRequest,
headers: dingtalkdiot__1__0_models.BindSystemHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.BindSystemResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.auth_code):
body['authCode'] = request.auth_code
if not UtilClient.is_unset(request.client_id):
body['clientId'] = request.client_id
if not UtilClient.is_unset(request.client_name):
body['clientName'] = request.client_name
if not UtilClient.is_unset(request.extra_data):
body['extraData'] = request.extra_data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.BindSystemResponse(),
self.do_roarequest('BindSystem', 'diot_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/diot/systems/bind', 'json', req, runtime)
)
async def bind_system_with_options_async(
self,
request: dingtalkdiot__1__0_models.BindSystemRequest,
headers: dingtalkdiot__1__0_models.BindSystemHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdiot__1__0_models.BindSystemResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.auth_code):
body['authCode'] = request.auth_code
if not UtilClient.is_unset(request.client_id):
body['clientId'] = request.client_id
if not UtilClient.is_unset(request.client_name):
body['clientName'] = request.client_name
if not UtilClient.is_unset(request.extra_data):
body['extraData'] = request.extra_data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdiot__1__0_models.BindSystemResponse(),
await self.do_roarequest_async('BindSystem', 'diot_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/diot/systems/bind', 'json', req, runtime)
)
| 48.499239
| 170
| 0.693196
| 3,522
| 31,864
| 5.875923
| 0.050256
| 0.012563
| 0.076444
| 0.109205
| 0.96748
| 0.940034
| 0.932689
| 0.915294
| 0.909785
| 0.87567
| 0
| 0.011779
| 0.21937
| 31,864
| 656
| 171
| 48.573171
| 0.820214
| 0.002511
| 0
| 0.819805
| 1
| 0
| 0.068115
| 0.029902
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027597
| false
| 0.006494
| 0.011364
| 0
| 0.092532
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
31365f39e65e8cf2c68110862600cfce569c1528
| 209,510
|
py
|
Python
|
xarray/core/_reductions.py
|
tovogt/xarray
|
95bb9ae4233c16639682a532c14b26a3ea2728f3
|
[
"Apache-2.0"
] | null | null | null |
xarray/core/_reductions.py
|
tovogt/xarray
|
95bb9ae4233c16639682a532c14b26a3ea2728f3
|
[
"Apache-2.0"
] | 3
|
2022-03-22T20:52:33.000Z
|
2022-03-22T20:52:36.000Z
|
xarray/core/_reductions.py
|
tovogt/xarray
|
95bb9ae4233c16639682a532c14b26a3ea2728f3
|
[
"Apache-2.0"
] | null | null | null |
"""Mixin classes with reduction operations."""
# This file was generated using xarray.util.generate_reductions. Do not edit manually.
from typing import TYPE_CHECKING, Any, Callable, Hashable, Optional, Sequence, Union
from . import duck_array_ops
if TYPE_CHECKING:
from .dataarray import DataArray
from .dataset import Dataset
class DatasetReductions:
__slots__ = ()
def reduce(
self,
func: Callable[..., Any],
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
axis: Union[None, int, Sequence[int]] = None,
keep_attrs: bool = None,
keepdims: bool = False,
**kwargs: Any,
) -> "Dataset":
raise NotImplementedError()
def count(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``count`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``count`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``count`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.count
dask.array.count
DataArray.count
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.count()
<xarray.Dataset>
Dimensions: ()
Data variables:
da int64 5
"""
return self.reduce(
duck_array_ops.count,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def all(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``all`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``all`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``all`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.all
dask.array.all
DataArray.all
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) bool True True True True True False
>>> ds.all()
<xarray.Dataset>
Dimensions: ()
Data variables:
da bool False
"""
return self.reduce(
duck_array_ops.array_all,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def any(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``any`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``any`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``any`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.any
dask.array.any
DataArray.any
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) bool True True True True True False
>>> ds.any()
<xarray.Dataset>
Dimensions: ()
Data variables:
da bool True
"""
return self.reduce(
duck_array_ops.array_any,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def max(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``max`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``max`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``max`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.max
dask.array.max
DataArray.max
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.max()
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 3.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.max(skipna=False)
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 nan
"""
return self.reduce(
duck_array_ops.max,
dim=dim,
skipna=skipna,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def min(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``min`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``min`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``min`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.min
dask.array.min
DataArray.min
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.min()
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 1.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.min(skipna=False)
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 nan
"""
return self.reduce(
duck_array_ops.min,
dim=dim,
skipna=skipna,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def mean(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``mean`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``mean`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``mean`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.mean
dask.array.mean
DataArray.mean
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.mean()
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 1.8
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.mean(skipna=False)
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 nan
"""
return self.reduce(
duck_array_ops.mean,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def prod(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
min_count: Optional[int] = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``prod`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int, default: None
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``prod`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``prod`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.prod
dask.array.prod
DataArray.prod
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.prod()
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 12.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.prod(skipna=False)
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 nan
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> ds.prod(skipna=True, min_count=2)
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 12.0
"""
return self.reduce(
duck_array_ops.prod,
dim=dim,
skipna=skipna,
min_count=min_count,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def sum(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
min_count: Optional[int] = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``sum`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int, default: None
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``sum`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``sum`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.sum
dask.array.sum
DataArray.sum
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.sum()
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 9.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.sum(skipna=False)
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 nan
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> ds.sum(skipna=True, min_count=2)
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 9.0
"""
return self.reduce(
duck_array_ops.sum,
dim=dim,
skipna=skipna,
min_count=min_count,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def std(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
ddof: int = 0,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``std`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``std`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``std`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.std
dask.array.std
DataArray.std
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.std()
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 0.7483
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.std(skipna=False)
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 nan
Specify ``ddof=1`` for an unbiased estimate.
>>> ds.std(skipna=True, ddof=1)
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 0.8367
"""
return self.reduce(
duck_array_ops.std,
dim=dim,
skipna=skipna,
ddof=ddof,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def var(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
ddof: int = 0,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``var`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``var`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``var`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.var
dask.array.var
DataArray.var
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.var()
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 0.56
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.var(skipna=False)
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 nan
Specify ``ddof=1`` for an unbiased estimate.
>>> ds.var(skipna=True, ddof=1)
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 0.7
"""
return self.reduce(
duck_array_ops.var,
dim=dim,
skipna=skipna,
ddof=ddof,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def median(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``median`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``median`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``median`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.median
dask.array.median
DataArray.median
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.median()
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.median(skipna=False)
<xarray.Dataset>
Dimensions: ()
Data variables:
da float64 nan
"""
return self.reduce(
duck_array_ops.median,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
class DataArrayReductions:
__slots__ = ()
def reduce(
self,
func: Callable[..., Any],
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
axis: Union[None, int, Sequence[int]] = None,
keep_attrs: bool = None,
keepdims: bool = False,
**kwargs: Any,
) -> "DataArray":
raise NotImplementedError()
def count(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``count`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``count`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``count`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.count
dask.array.count
Dataset.count
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.count()
<xarray.DataArray ()>
array(5)
"""
return self.reduce(
duck_array_ops.count,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
)
def all(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``all`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``all`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``all`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.all
dask.array.all
Dataset.all
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ True, True, True, True, True, False])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.all()
<xarray.DataArray ()>
array(False)
"""
return self.reduce(
duck_array_ops.array_all,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
)
def any(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``any`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``any`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``any`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.any
dask.array.any
Dataset.any
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ True, True, True, True, True, False])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.any()
<xarray.DataArray ()>
array(True)
"""
return self.reduce(
duck_array_ops.array_any,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
)
def max(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``max`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``max`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``max`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.max
dask.array.max
Dataset.max
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.max()
<xarray.DataArray ()>
array(3.)
Use ``skipna`` to control whether NaNs are ignored.
>>> da.max(skipna=False)
<xarray.DataArray ()>
array(nan)
"""
return self.reduce(
duck_array_ops.max,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
def min(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``min`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``min`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``min`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.min
dask.array.min
Dataset.min
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.min()
<xarray.DataArray ()>
array(1.)
Use ``skipna`` to control whether NaNs are ignored.
>>> da.min(skipna=False)
<xarray.DataArray ()>
array(nan)
"""
return self.reduce(
duck_array_ops.min,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
def mean(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``mean`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``mean`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``mean`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.mean
dask.array.mean
Dataset.mean
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.mean()
<xarray.DataArray ()>
array(1.8)
Use ``skipna`` to control whether NaNs are ignored.
>>> da.mean(skipna=False)
<xarray.DataArray ()>
array(nan)
"""
return self.reduce(
duck_array_ops.mean,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
def prod(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
min_count: Optional[int] = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``prod`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int, default: None
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``prod`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``prod`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.prod
dask.array.prod
Dataset.prod
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.prod()
<xarray.DataArray ()>
array(12.)
Use ``skipna`` to control whether NaNs are ignored.
>>> da.prod(skipna=False)
<xarray.DataArray ()>
array(nan)
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> da.prod(skipna=True, min_count=2)
<xarray.DataArray ()>
array(12.)
"""
return self.reduce(
duck_array_ops.prod,
dim=dim,
skipna=skipna,
min_count=min_count,
keep_attrs=keep_attrs,
**kwargs,
)
def sum(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
min_count: Optional[int] = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``sum`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int, default: None
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``sum`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``sum`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.sum
dask.array.sum
Dataset.sum
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.sum()
<xarray.DataArray ()>
array(9.)
Use ``skipna`` to control whether NaNs are ignored.
>>> da.sum(skipna=False)
<xarray.DataArray ()>
array(nan)
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> da.sum(skipna=True, min_count=2)
<xarray.DataArray ()>
array(9.)
"""
return self.reduce(
duck_array_ops.sum,
dim=dim,
skipna=skipna,
min_count=min_count,
keep_attrs=keep_attrs,
**kwargs,
)
def std(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
ddof: int = 0,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``std`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``std`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``std`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.std
dask.array.std
Dataset.std
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.std()
<xarray.DataArray ()>
array(0.74833148)
Use ``skipna`` to control whether NaNs are ignored.
>>> da.std(skipna=False)
<xarray.DataArray ()>
array(nan)
Specify ``ddof=1`` for an unbiased estimate.
>>> da.std(skipna=True, ddof=1)
<xarray.DataArray ()>
array(0.83666003)
"""
return self.reduce(
duck_array_ops.std,
dim=dim,
skipna=skipna,
ddof=ddof,
keep_attrs=keep_attrs,
**kwargs,
)
def var(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
ddof: int = 0,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``var`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``var`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``var`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.var
dask.array.var
Dataset.var
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.var()
<xarray.DataArray ()>
array(0.56)
Use ``skipna`` to control whether NaNs are ignored.
>>> da.var(skipna=False)
<xarray.DataArray ()>
array(nan)
Specify ``ddof=1`` for an unbiased estimate.
>>> da.var(skipna=True, ddof=1)
<xarray.DataArray ()>
array(0.7)
"""
return self.reduce(
duck_array_ops.var,
dim=dim,
skipna=skipna,
ddof=ddof,
keep_attrs=keep_attrs,
**kwargs,
)
def median(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``median`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``median`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``median`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.median
dask.array.median
Dataset.median
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.median()
<xarray.DataArray ()>
array(2.)
Use ``skipna`` to control whether NaNs are ignored.
>>> da.median(skipna=False)
<xarray.DataArray ()>
array(nan)
"""
return self.reduce(
duck_array_ops.median,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
class DatasetGroupByReductions:
__slots__ = ()
def reduce(
self,
func: Callable[..., Any],
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
axis: Union[None, int, Sequence[int]] = None,
keep_attrs: bool = None,
keepdims: bool = False,
**kwargs: Any,
) -> "Dataset":
raise NotImplementedError()
def count(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``count`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``count`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``count`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.count
dask.array.count
Dataset.count
:ref:`groupby`
User guide on groupby operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.groupby("labels").count()
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) int64 1 2 2
"""
return self.reduce(
duck_array_ops.count,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def all(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``all`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``all`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``all`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.all
dask.array.all
Dataset.all
:ref:`groupby`
User guide on groupby operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) bool True True True True True False
>>> ds.groupby("labels").all()
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) bool False True True
"""
return self.reduce(
duck_array_ops.array_all,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def any(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``any`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``any`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``any`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.any
dask.array.any
Dataset.any
:ref:`groupby`
User guide on groupby operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) bool True True True True True False
>>> ds.groupby("labels").any()
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) bool True True True
"""
return self.reduce(
duck_array_ops.array_any,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def max(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``max`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``max`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``max`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.max
dask.array.max
Dataset.max
:ref:`groupby`
User guide on groupby operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.groupby("labels").max()
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 1.0 2.0 3.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.groupby("labels").max(skipna=False)
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 nan 2.0 3.0
"""
return self.reduce(
duck_array_ops.max,
dim=dim,
skipna=skipna,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def min(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``min`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``min`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``min`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.min
dask.array.min
Dataset.min
:ref:`groupby`
User guide on groupby operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.groupby("labels").min()
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 1.0 2.0 1.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.groupby("labels").min(skipna=False)
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 nan 2.0 1.0
"""
return self.reduce(
duck_array_ops.min,
dim=dim,
skipna=skipna,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def mean(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``mean`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``mean`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``mean`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.mean
dask.array.mean
Dataset.mean
:ref:`groupby`
User guide on groupby operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.groupby("labels").mean()
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 1.0 2.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.groupby("labels").mean(skipna=False)
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 nan 2.0 2.0
"""
return self.reduce(
duck_array_ops.mean,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def prod(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
min_count: Optional[int] = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``prod`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int, default: None
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``prod`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``prod`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.prod
dask.array.prod
Dataset.prod
:ref:`groupby`
User guide on groupby operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.groupby("labels").prod()
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 1.0 4.0 3.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.groupby("labels").prod(skipna=False)
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 nan 4.0 3.0
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> ds.groupby("labels").prod(skipna=True, min_count=2)
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 nan 4.0 3.0
"""
return self.reduce(
duck_array_ops.prod,
dim=dim,
skipna=skipna,
min_count=min_count,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def sum(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
min_count: Optional[int] = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``sum`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int, default: None
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``sum`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``sum`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.sum
dask.array.sum
Dataset.sum
:ref:`groupby`
User guide on groupby operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.groupby("labels").sum()
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 1.0 4.0 4.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.groupby("labels").sum(skipna=False)
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 nan 4.0 4.0
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> ds.groupby("labels").sum(skipna=True, min_count=2)
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 nan 4.0 4.0
"""
return self.reduce(
duck_array_ops.sum,
dim=dim,
skipna=skipna,
min_count=min_count,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def std(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
ddof: int = 0,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``std`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``std`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``std`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.std
dask.array.std
Dataset.std
:ref:`groupby`
User guide on groupby operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.groupby("labels").std()
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 0.0 0.0 1.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.groupby("labels").std(skipna=False)
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 nan 0.0 1.0
Specify ``ddof=1`` for an unbiased estimate.
>>> ds.groupby("labels").std(skipna=True, ddof=1)
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 nan 0.0 1.414
"""
return self.reduce(
duck_array_ops.std,
dim=dim,
skipna=skipna,
ddof=ddof,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def var(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
ddof: int = 0,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``var`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``var`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``var`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.var
dask.array.var
Dataset.var
:ref:`groupby`
User guide on groupby operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.groupby("labels").var()
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 0.0 0.0 1.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.groupby("labels").var(skipna=False)
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 nan 0.0 1.0
Specify ``ddof=1`` for an unbiased estimate.
>>> ds.groupby("labels").var(skipna=True, ddof=1)
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 nan 0.0 2.0
"""
return self.reduce(
duck_array_ops.var,
dim=dim,
skipna=skipna,
ddof=ddof,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def median(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``median`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``median`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``median`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.median
dask.array.median
Dataset.median
:ref:`groupby`
User guide on groupby operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.groupby("labels").median()
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 1.0 2.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.groupby("labels").median(skipna=False)
<xarray.Dataset>
Dimensions: (labels: 3)
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Data variables:
da (labels) float64 nan 2.0 2.0
"""
return self.reduce(
duck_array_ops.median,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
class DatasetResampleReductions:
__slots__ = ()
def reduce(
self,
func: Callable[..., Any],
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
axis: Union[None, int, Sequence[int]] = None,
keep_attrs: bool = None,
keepdims: bool = False,
**kwargs: Any,
) -> "Dataset":
raise NotImplementedError()
def count(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``count`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``count`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``count`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.count
dask.array.count
Dataset.count
:ref:`resampling`
User guide on resampling operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.resample(time="3M").count()
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) int64 1 3 1
"""
return self.reduce(
duck_array_ops.count,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def all(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``all`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``all`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``all`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.all
dask.array.all
Dataset.all
:ref:`resampling`
User guide on resampling operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) bool True True True True True False
>>> ds.resample(time="3M").all()
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) bool True True False
"""
return self.reduce(
duck_array_ops.array_all,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def any(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``any`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``any`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``any`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.any
dask.array.any
Dataset.any
:ref:`resampling`
User guide on resampling operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) bool True True True True True False
>>> ds.resample(time="3M").any()
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) bool True True True
"""
return self.reduce(
duck_array_ops.array_any,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def max(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``max`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``max`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``max`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.max
dask.array.max
Dataset.max
:ref:`resampling`
User guide on resampling operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.resample(time="3M").max()
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 1.0 3.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3M").max(skipna=False)
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 1.0 3.0 nan
"""
return self.reduce(
duck_array_ops.max,
dim=dim,
skipna=skipna,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def min(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``min`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``min`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``min`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.min
dask.array.min
Dataset.min
:ref:`resampling`
User guide on resampling operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.resample(time="3M").min()
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 1.0 1.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3M").min(skipna=False)
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 1.0 1.0 nan
"""
return self.reduce(
duck_array_ops.min,
dim=dim,
skipna=skipna,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def mean(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``mean`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``mean`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``mean`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.mean
dask.array.mean
Dataset.mean
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.resample(time="3M").mean()
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 1.0 2.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3M").mean(skipna=False)
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 1.0 2.0 nan
"""
return self.reduce(
duck_array_ops.mean,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def prod(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
min_count: Optional[int] = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``prod`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int, default: None
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``prod`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``prod`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.prod
dask.array.prod
Dataset.prod
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.resample(time="3M").prod()
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 1.0 6.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3M").prod(skipna=False)
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 1.0 6.0 nan
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> ds.resample(time="3M").prod(skipna=True, min_count=2)
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 nan 6.0 nan
"""
return self.reduce(
duck_array_ops.prod,
dim=dim,
skipna=skipna,
min_count=min_count,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def sum(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
min_count: Optional[int] = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``sum`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int, default: None
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``sum`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``sum`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.sum
dask.array.sum
Dataset.sum
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.resample(time="3M").sum()
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 1.0 6.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3M").sum(skipna=False)
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 1.0 6.0 nan
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> ds.resample(time="3M").sum(skipna=True, min_count=2)
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 nan 6.0 nan
"""
return self.reduce(
duck_array_ops.sum,
dim=dim,
skipna=skipna,
min_count=min_count,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def std(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
ddof: int = 0,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``std`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``std`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``std`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.std
dask.array.std
Dataset.std
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.resample(time="3M").std()
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 0.0 0.8165 0.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3M").std(skipna=False)
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 0.0 0.8165 nan
Specify ``ddof=1`` for an unbiased estimate.
>>> ds.resample(time="3M").std(skipna=True, ddof=1)
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 nan 1.0 nan
"""
return self.reduce(
duck_array_ops.std,
dim=dim,
skipna=skipna,
ddof=ddof,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def var(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
ddof: int = 0,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``var`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``var`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``var`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.var
dask.array.var
Dataset.var
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.resample(time="3M").var()
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 0.0 0.6667 0.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3M").var(skipna=False)
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 0.0 0.6667 nan
Specify ``ddof=1`` for an unbiased estimate.
>>> ds.resample(time="3M").var(skipna=True, ddof=1)
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 nan 1.0 nan
"""
return self.reduce(
duck_array_ops.var,
dim=dim,
skipna=skipna,
ddof=ddof,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def median(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "Dataset":
"""
Reduce this Dataset's data by applying ``median`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``median`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``median`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.median
dask.array.median
Dataset.median
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 1.0 2.0 3.0 1.0 2.0 nan
>>> ds.resample(time="3M").median()
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 1.0 2.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3M").median(skipna=False)
<xarray.Dataset>
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 1.0 2.0 nan
"""
return self.reduce(
duck_array_ops.median,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
class DataArrayGroupByReductions:
__slots__ = ()
def reduce(
self,
func: Callable[..., Any],
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
axis: Union[None, int, Sequence[int]] = None,
keep_attrs: bool = None,
keepdims: bool = False,
**kwargs: Any,
) -> "DataArray":
raise NotImplementedError()
def count(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``count`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``count`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``count`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.count
dask.array.count
DataArray.count
:ref:`groupby`
User guide on groupby operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.groupby("labels").count()
<xarray.DataArray (labels: 3)>
array([1, 2, 2])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
"""
return self.reduce(
duck_array_ops.count,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
)
def all(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``all`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``all`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``all`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.all
dask.array.all
DataArray.all
:ref:`groupby`
User guide on groupby operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ True, True, True, True, True, False])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.groupby("labels").all()
<xarray.DataArray (labels: 3)>
array([False, True, True])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
"""
return self.reduce(
duck_array_ops.array_all,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
)
def any(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``any`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``any`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``any`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.any
dask.array.any
DataArray.any
:ref:`groupby`
User guide on groupby operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ True, True, True, True, True, False])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.groupby("labels").any()
<xarray.DataArray (labels: 3)>
array([ True, True, True])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
"""
return self.reduce(
duck_array_ops.array_any,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
)
def max(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``max`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``max`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``max`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.max
dask.array.max
DataArray.max
:ref:`groupby`
User guide on groupby operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.groupby("labels").max()
<xarray.DataArray (labels: 3)>
array([1., 2., 3.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Use ``skipna`` to control whether NaNs are ignored.
>>> da.groupby("labels").max(skipna=False)
<xarray.DataArray (labels: 3)>
array([nan, 2., 3.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
"""
return self.reduce(
duck_array_ops.max,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
def min(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``min`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``min`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``min`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.min
dask.array.min
DataArray.min
:ref:`groupby`
User guide on groupby operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.groupby("labels").min()
<xarray.DataArray (labels: 3)>
array([1., 2., 1.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Use ``skipna`` to control whether NaNs are ignored.
>>> da.groupby("labels").min(skipna=False)
<xarray.DataArray (labels: 3)>
array([nan, 2., 1.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
"""
return self.reduce(
duck_array_ops.min,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
def mean(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``mean`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``mean`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``mean`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.mean
dask.array.mean
DataArray.mean
:ref:`groupby`
User guide on groupby operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.groupby("labels").mean()
<xarray.DataArray (labels: 3)>
array([1., 2., 2.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Use ``skipna`` to control whether NaNs are ignored.
>>> da.groupby("labels").mean(skipna=False)
<xarray.DataArray (labels: 3)>
array([nan, 2., 2.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
"""
return self.reduce(
duck_array_ops.mean,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
def prod(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
min_count: Optional[int] = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``prod`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int, default: None
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``prod`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``prod`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.prod
dask.array.prod
DataArray.prod
:ref:`groupby`
User guide on groupby operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.groupby("labels").prod()
<xarray.DataArray (labels: 3)>
array([1., 4., 3.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Use ``skipna`` to control whether NaNs are ignored.
>>> da.groupby("labels").prod(skipna=False)
<xarray.DataArray (labels: 3)>
array([nan, 4., 3.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> da.groupby("labels").prod(skipna=True, min_count=2)
<xarray.DataArray (labels: 3)>
array([nan, 4., 3.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
"""
return self.reduce(
duck_array_ops.prod,
dim=dim,
skipna=skipna,
min_count=min_count,
keep_attrs=keep_attrs,
**kwargs,
)
def sum(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
min_count: Optional[int] = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``sum`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int, default: None
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``sum`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``sum`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.sum
dask.array.sum
DataArray.sum
:ref:`groupby`
User guide on groupby operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.groupby("labels").sum()
<xarray.DataArray (labels: 3)>
array([1., 4., 4.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Use ``skipna`` to control whether NaNs are ignored.
>>> da.groupby("labels").sum(skipna=False)
<xarray.DataArray (labels: 3)>
array([nan, 4., 4.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> da.groupby("labels").sum(skipna=True, min_count=2)
<xarray.DataArray (labels: 3)>
array([nan, 4., 4.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
"""
return self.reduce(
duck_array_ops.sum,
dim=dim,
skipna=skipna,
min_count=min_count,
keep_attrs=keep_attrs,
**kwargs,
)
def std(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
ddof: int = 0,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``std`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``std`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``std`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.std
dask.array.std
DataArray.std
:ref:`groupby`
User guide on groupby operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.groupby("labels").std()
<xarray.DataArray (labels: 3)>
array([0., 0., 1.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Use ``skipna`` to control whether NaNs are ignored.
>>> da.groupby("labels").std(skipna=False)
<xarray.DataArray (labels: 3)>
array([nan, 0., 1.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Specify ``ddof=1`` for an unbiased estimate.
>>> da.groupby("labels").std(skipna=True, ddof=1)
<xarray.DataArray (labels: 3)>
array([ nan, 0. , 1.41421356])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
"""
return self.reduce(
duck_array_ops.std,
dim=dim,
skipna=skipna,
ddof=ddof,
keep_attrs=keep_attrs,
**kwargs,
)
def var(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
ddof: int = 0,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``var`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``var`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``var`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.var
dask.array.var
DataArray.var
:ref:`groupby`
User guide on groupby operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.groupby("labels").var()
<xarray.DataArray (labels: 3)>
array([0., 0., 1.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Use ``skipna`` to control whether NaNs are ignored.
>>> da.groupby("labels").var(skipna=False)
<xarray.DataArray (labels: 3)>
array([nan, 0., 1.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Specify ``ddof=1`` for an unbiased estimate.
>>> da.groupby("labels").var(skipna=True, ddof=1)
<xarray.DataArray (labels: 3)>
array([nan, 0., 2.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
"""
return self.reduce(
duck_array_ops.var,
dim=dim,
skipna=skipna,
ddof=ddof,
keep_attrs=keep_attrs,
**kwargs,
)
def median(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``median`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``median`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``median`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.median
dask.array.median
DataArray.median
:ref:`groupby`
User guide on groupby operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.groupby("labels").median()
<xarray.DataArray (labels: 3)>
array([1., 2., 2.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
Use ``skipna`` to control whether NaNs are ignored.
>>> da.groupby("labels").median(skipna=False)
<xarray.DataArray (labels: 3)>
array([nan, 2., 2.])
Coordinates:
* labels (labels) object 'a' 'b' 'c'
"""
return self.reduce(
duck_array_ops.median,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
class DataArrayResampleReductions:
__slots__ = ()
def reduce(
self,
func: Callable[..., Any],
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
axis: Union[None, int, Sequence[int]] = None,
keep_attrs: bool = None,
keepdims: bool = False,
**kwargs: Any,
) -> "DataArray":
raise NotImplementedError()
def count(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``count`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``count`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``count`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.count
dask.array.count
DataArray.count
:ref:`resampling`
User guide on resampling operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3M").count()
<xarray.DataArray (time: 3)>
array([1, 3, 1])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
"""
return self.reduce(
duck_array_ops.count,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
)
def all(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``all`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``all`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``all`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.all
dask.array.all
DataArray.all
:ref:`resampling`
User guide on resampling operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ True, True, True, True, True, False])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3M").all()
<xarray.DataArray (time: 3)>
array([ True, True, False])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
"""
return self.reduce(
duck_array_ops.array_all,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
)
def any(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``any`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``any`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``any`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.any
dask.array.any
DataArray.any
:ref:`resampling`
User guide on resampling operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ True, True, True, True, True, False])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3M").any()
<xarray.DataArray (time: 3)>
array([ True, True, True])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
"""
return self.reduce(
duck_array_ops.array_any,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
)
def max(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``max`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``max`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``max`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.max
dask.array.max
DataArray.max
:ref:`resampling`
User guide on resampling operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3M").max()
<xarray.DataArray (time: 3)>
array([1., 3., 2.])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3M").max(skipna=False)
<xarray.DataArray (time: 3)>
array([ 1., 3., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
"""
return self.reduce(
duck_array_ops.max,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
def min(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``min`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``min`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``min`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.min
dask.array.min
DataArray.min
:ref:`resampling`
User guide on resampling operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3M").min()
<xarray.DataArray (time: 3)>
array([1., 1., 2.])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3M").min(skipna=False)
<xarray.DataArray (time: 3)>
array([ 1., 1., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
"""
return self.reduce(
duck_array_ops.min,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
def mean(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``mean`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``mean`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``mean`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.mean
dask.array.mean
DataArray.mean
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3M").mean()
<xarray.DataArray (time: 3)>
array([1., 2., 2.])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3M").mean(skipna=False)
<xarray.DataArray (time: 3)>
array([ 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
"""
return self.reduce(
duck_array_ops.mean,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
def prod(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
min_count: Optional[int] = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``prod`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int, default: None
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``prod`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``prod`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.prod
dask.array.prod
DataArray.prod
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3M").prod()
<xarray.DataArray (time: 3)>
array([1., 6., 2.])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3M").prod(skipna=False)
<xarray.DataArray (time: 3)>
array([ 1., 6., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> da.resample(time="3M").prod(skipna=True, min_count=2)
<xarray.DataArray (time: 3)>
array([nan, 6., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
"""
return self.reduce(
duck_array_ops.prod,
dim=dim,
skipna=skipna,
min_count=min_count,
keep_attrs=keep_attrs,
**kwargs,
)
def sum(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
min_count: Optional[int] = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``sum`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int, default: None
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``sum`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``sum`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.sum
dask.array.sum
DataArray.sum
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3M").sum()
<xarray.DataArray (time: 3)>
array([1., 6., 2.])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3M").sum(skipna=False)
<xarray.DataArray (time: 3)>
array([ 1., 6., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> da.resample(time="3M").sum(skipna=True, min_count=2)
<xarray.DataArray (time: 3)>
array([nan, 6., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
"""
return self.reduce(
duck_array_ops.sum,
dim=dim,
skipna=skipna,
min_count=min_count,
keep_attrs=keep_attrs,
**kwargs,
)
def std(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
ddof: int = 0,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``std`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``std`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``std`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.std
dask.array.std
DataArray.std
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3M").std()
<xarray.DataArray (time: 3)>
array([0. , 0.81649658, 0. ])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3M").std(skipna=False)
<xarray.DataArray (time: 3)>
array([0. , 0.81649658, nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Specify ``ddof=1`` for an unbiased estimate.
>>> da.resample(time="3M").std(skipna=True, ddof=1)
<xarray.DataArray (time: 3)>
array([nan, 1., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
"""
return self.reduce(
duck_array_ops.std,
dim=dim,
skipna=skipna,
ddof=ddof,
keep_attrs=keep_attrs,
**kwargs,
)
def var(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
ddof: int = 0,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``var`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``var`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``var`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.var
dask.array.var
DataArray.var
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3M").var()
<xarray.DataArray (time: 3)>
array([0. , 0.66666667, 0. ])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3M").var(skipna=False)
<xarray.DataArray (time: 3)>
array([0. , 0.66666667, nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Specify ``ddof=1`` for an unbiased estimate.
>>> da.resample(time="3M").var(skipna=True, ddof=1)
<xarray.DataArray (time: 3)>
array([nan, 1., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
"""
return self.reduce(
duck_array_ops.var,
dim=dim,
skipna=skipna,
ddof=ddof,
keep_attrs=keep_attrs,
**kwargs,
)
def median(
self,
dim: Union[None, Hashable, Sequence[Hashable]] = None,
*,
skipna: bool = None,
keep_attrs: bool = None,
**kwargs,
) -> "DataArray":
"""
Reduce this DataArray's data by applying ``median`` along some dimension(s).
Parameters
----------
dim : hashable or iterable of hashable, default: None
Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over all dimensions.
skipna : bool, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating ``median`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``median`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.median
dask.array.median
DataArray.median
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 1, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("01-01-2001", freq="M", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)>
array([ 1., 2., 3., 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3M").median()
<xarray.DataArray (time: 3)>
array([1., 2., 2.])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3M").median(skipna=False)
<xarray.DataArray (time: 3)>
array([ 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31
"""
return self.reduce(
duck_array_ops.median,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
| 35.005848
| 92
| 0.511298
| 24,454
| 209,510
| 4.352253
| 0.010837
| 0.022832
| 0.005017
| 0.004961
| 0.991891
| 0.990482
| 0.989242
| 0.98476
| 0.97978
| 0.973786
| 0
| 0.039714
| 0.357964
| 209,510
| 5,984
| 93
| 35.011698
| 0.751511
| 0.695523
| 0
| 0.931127
| 1
| 0
| 0.019384
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064401
| false
| 0
| 0.003578
| 0
| 0.137746
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
316f33673207a0ae4a0b7b2dd77f61583358eea6
| 127
|
py
|
Python
|
lib/clients/channels/__init__.py
|
cookieisland/cabernet
|
9f429fe7a75707da97133b7ec4b3cf6b7aaec6cd
|
[
"MIT"
] | 16
|
2021-08-30T07:05:28.000Z
|
2022-03-04T06:46:42.000Z
|
lib/clients/channels/__init__.py
|
cookieisland/cabernet
|
9f429fe7a75707da97133b7ec4b3cf6b7aaec6cd
|
[
"MIT"
] | 14
|
2021-02-20T22:24:49.000Z
|
2021-08-30T01:24:02.000Z
|
lib/clients/channels/__init__.py
|
cookieisland/cabernet
|
9f429fe7a75707da97133b7ec4b3cf6b7aaec6cd
|
[
"MIT"
] | 10
|
2021-03-17T22:53:03.000Z
|
2021-08-29T19:35:28.000Z
|
import lib.clients.channels.channels
import lib.clients.channels.channels_html
import lib.clients.channels.channels_form_html
| 25.4
| 46
| 0.874016
| 18
| 127
| 6
| 0.333333
| 0.25
| 0.444444
| 0.666667
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055118
| 127
| 4
| 47
| 31.75
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
31ae1bd906af1e5234916a43825b3e6f73640735
| 3,300
|
py
|
Python
|
kuyruk/signals.py
|
BatuAksoy/kuyruk
|
1052334e804c137245ddbbed31c75fa0aac46a71
|
[
"MIT"
] | 154
|
2015-01-08T11:06:17.000Z
|
2022-03-27T11:44:30.000Z
|
kuyruk/signals.py
|
BatuAksoy/kuyruk
|
1052334e804c137245ddbbed31c75fa0aac46a71
|
[
"MIT"
] | 39
|
2015-01-28T11:29:17.000Z
|
2022-01-04T14:14:06.000Z
|
kuyruk/signals.py
|
BatuAksoy/kuyruk
|
1052334e804c137245ddbbed31c75fa0aac46a71
|
[
"MIT"
] | 12
|
2015-05-26T17:08:50.000Z
|
2021-12-23T13:37:19.000Z
|
from blinker import Signal
#: Sent when the task decorator is applied.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
task_init = Signal()
#: Sent before the task is applied.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
task_preapply = Signal()
#: Sent after the task is applied.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
task_postapply = Signal()
#: Sent before the wrapped function is executed.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
task_prerun = Signal()
#: Sent after the wrapped function is executed.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
task_postrun = Signal()
#: Sent when the wrapped function is returned.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
task_success = Signal()
#: Sent when the wrapped function raises an exception.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
#: * exc_info: Return value of ``sys.exc_info()``
task_error = Signal()
#: Sent when the task fails after all retries(if any).
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
#: * exc_info: Return value of ``sys.exc_info()``
task_failure = Signal()
#: Sent before the task is sent to queue.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
#: * description: dict representation of the task
task_presend = Signal()
#: Sent after the task is sent to queue.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
#: * description: dict representation of the task
task_postsend = Signal()
#: Sent when the task fails.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * worker: The Worker object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
#: * description: dict representation of the task
#: * exc_info: Return value of ``sys.exc_info()``
worker_failure = Signal()
#: Sent when the worker is initialized.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * worker: The Worker object
worker_init = Signal()
#: Sent when the worker is started.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * worker: The Worker object
worker_start = Signal()
#: Sent when the worker shuts down.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * worker: The Worker object
worker_shutdown = Signal()
| 25.984252
| 54
| 0.658182
| 410
| 3,300
| 5.24878
| 0.14878
| 0.097584
| 0.130112
| 0.167286
| 0.888941
| 0.868494
| 0.762546
| 0.762546
| 0.74303
| 0.644517
| 0
| 0
| 0.221515
| 3,300
| 126
| 55
| 26.190476
| 0.83768
| 0.826364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
31d3a82c5924ed049b7fbe4276a2ebbac4208fc8
| 1,247
|
py
|
Python
|
pytools/src/oldsrc/evalwin.py
|
selentd/pythontools
|
ab3158dca1c3f6ef0f6d6678070da4a6551fa334
|
[
"Apache-2.0"
] | null | null | null |
pytools/src/oldsrc/evalwin.py
|
selentd/pythontools
|
ab3158dca1c3f6ef0f6d6678070da4a6551fa334
|
[
"Apache-2.0"
] | null | null | null |
pytools/src/oldsrc/evalwin.py
|
selentd/pythontools
|
ab3158dca1c3f6ef0f6d6678070da4a6551fa334
|
[
"Apache-2.0"
] | null | null | null |
from evalindexdata import EvalIndexDataSell
class EvalWinSellCall(EvalIndexDataSell):
def __init__(self, maxLoss):
self.maxLoss = maxLoss
self.lastClose = 0.0
def updateState(self, indexData, lastBuy):
self.lastClose = lastBuy.close
def evaluateMaxLoss(self, close):
checkSell = False
result = close / self.lastClose
result -= 1.0
result *= 100.0
if result < self.maxLoss:
checkSell = True
return checkSell
def evaluateSell(self, indexData):
return self.evaluateMaxLoss(indexData.close)
class EvalWinSellPut(EvalIndexDataSell):
def __init__(self, maxLoss):
self.maxLoss = maxLoss
self.lastClose = 0.0
def updateState(self, indexData, lastBuy):
self.lastClose = lastBuy.close
def evaluateMaxLoss(self, close):
checkSell = False
result = self.lastClose / close
result -= 1.0
result *= 100.0
if result < self.maxLoss:
checkSell = True
return checkSell
def evaluateSell(self, indexData):
return self.evaluateMaxLoss(indexData.close)
| 28.340909
| 52
| 0.589415
| 116
| 1,247
| 6.267241
| 0.232759
| 0.090784
| 0.066025
| 0.077029
| 0.841816
| 0.841816
| 0.841816
| 0.841816
| 0.841816
| 0.841816
| 0
| 0.019347
| 0.336808
| 1,247
| 44
| 53
| 28.340909
| 0.859734
| 0
| 0
| 0.848485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.242424
| false
| 0
| 0.030303
| 0.060606
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b42e9d2fd9e836be8dce4861724be13f74c95da3
| 12,188
|
py
|
Python
|
skdecide/builders/scheduling/task_duration.py
|
galleon/bug-free-invention
|
37bcea112da39d1390ff2b30951b36ee5dbc0e6d
|
[
"MIT"
] | null | null | null |
skdecide/builders/scheduling/task_duration.py
|
galleon/bug-free-invention
|
37bcea112da39d1390ff2b30951b36ee5dbc0e6d
|
[
"MIT"
] | null | null | null |
skdecide/builders/scheduling/task_duration.py
|
galleon/bug-free-invention
|
37bcea112da39d1390ff2b30951b36ee5dbc0e6d
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Optional, Dict
from skdecide.core import DiscreteDistribution, Distribution
__all__ = ['SimulatedTaskDuration', 'UncertainMultivariateTaskDuration', 'UncertainUnivariateTaskDuration', 'UncertainBoundedTaskDuration', 'UniformBoundedTaskDuration', 'EnumerableTaskDuration', 'DeterministicTaskDuration']
class SimulatedTaskDuration:
"""A domain must inherit this class if the task duration requires sampling from a simulation."""
# TODO, this can be challenged.. for uncertain domain (with adistribution, you want to sample a different value each time.
# that 's why i override this sample_task_duration in below level.
def sample_task_duration(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Sample, store and return task duration for the given task in the given mode."""
if task not in self.sampled_durations:
self.sampled_durations[task] = {}
if mode not in self.sampled_durations[task]:
self.sampled_durations[task][mode] = {}
if progress_from not in self.sampled_durations[task][mode]:
self.sampled_durations[task][mode][progress_from] = self._sample_task_duration(task, mode, progress_from)
return self.sampled_durations[task][mode][progress_from]
def _sample_task_duration(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return a task duration for the given task in the given mode."""
raise NotImplementedError
def get_latest_sampled_duration(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.):
if task in self.sampled_durations:
if mode in self.sampled_durations[task]:
if progress_from in self.sampled_durations[task][mode]:
return self.sampled_durations[task][mode][progress_from]
return self.sample_task_duration(task, mode, progress_from)
# TODO: Can we currently model multivariate distribution with the Distribution object ?
class UncertainMultivariateTaskDuration(SimulatedTaskDuration):
"""A domain must inherit this class if the task duration is uncertain and follows a know multivariate
distribution."""
def sample_task_duration(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying multiivariate distribution."""
return self._sample_task_duration(task=task, mode=mode, progress_from=progress_from)
def _sample_task_duration(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying multiivariate distribution."""
return self.get_task_duration_distribution(task, mode).sample()
def get_task_duration_distribution(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.,
multivariate_settings: Optional[Dict[str, int]] = None) -> Distribution:
"""Return the multivariate Distribution of the duration of the given task in the given mode.
Multivariate seetings need to be provided. """
return self._get_task_duration_distribution(task, mode, progress_from, multivariate_settings)
def _get_task_duration_distribution(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.,
multivariate_settings: Optional[Dict[str, int]] = None) -> Distribution:
"""Return the multivariate Distribution of the duration of the given task in the given mode.
Multivariate seetings need to be provided. """
raise NotImplementedError
class UncertainUnivariateTaskDuration(UncertainMultivariateTaskDuration):
"""A domain must inherit this class if the task duration is uncertain and follows a know univariate distribution."""
def _sample_task_duration(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying univariate distribution."""
return self.get_task_duration_distribution(task, mode).sample()
def _get_task_duration_distribution(self, task: int, mode: Optional[int] = 1,
progress_from: Optional[float]=0.,
multivariate_settings: Optional[Dict[str, int]] = None) -> Distribution: # TODO, problem here i think
"""Return the univariate Distribution of the duration of the given task in the given mode."""
raise NotImplementedError
class UncertainBoundedTaskDuration(UncertainUnivariateTaskDuration):
"""A domain must inherit this class if the task duration is known to be between a lower and upper bound
and follows a known distribution between these bounds."""
def _sample_task_duration(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying univariate bounded distribution."""
return self.get_task_duration_distribution(task, mode).sample()
def _get_task_duration_distribution(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.,
multivariate_settings: Optional[Dict[str, int]] = None) -> DiscreteDistribution:
"""Return the Distribution of the duration of the given task in the given mode.
The distribution returns values beween the defined lower and upper bounds."""
raise NotImplementedError
def get_task_duration_upper_bound(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
return self._get_task_duration_upper_bound(task, mode, progress_from)
def _get_task_duration_upper_bound(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
raise NotImplementedError
def get_task_duration_lower_bound(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
return self._get_task_duration_lower_bound(task, mode, progress_from)
def _get_task_duration_lower_bound(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
raise NotImplementedError
class UniformBoundedTaskDuration(UncertainBoundedTaskDuration):
"""A domain must inherit this class if the task duration is known to be between a lower and upper bound
and follows a uniform distribution between these bounds."""
def _sample_task_duration(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying univariate uniform bounded distribution."""
return self.get_task_duration_distribution(task, mode).sample()
def _get_task_duration_distribution(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.,
multivariate_settings: Optional[Dict[str, int]] = None) -> DiscreteDistribution:
"""Return the Distribution of the duration of the given task in the given mode.
The distribution is uniform between the defined lower and upper bounds."""
lb = self.get_task_duration_lower_bound(task, mode)
ub = self.get_task_duration_upper_bound(task, mode)
n_vals = ub - lb + 1
p = 1.0 / float(n_vals)
values = [(x, p) for x in range(lb, ub+1)]
return DiscreteDistribution(values)
def _get_task_duration_upper_bound(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
raise NotImplementedError
def _get_task_duration_lower_bound(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
raise NotImplementedError
class EnumerableTaskDuration(UncertainBoundedTaskDuration):
"""A domain must inherit this class if the task duration for each task is enumerable."""
def _sample_task_duration(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return a task duration for the given task in the given mode."""
return self.get_task_duration_distribution(task, mode).sample()
def _get_task_duration_distribution(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.,
multivariate_settings: Optional[Dict[str, int]] = None) -> DiscreteDistribution:
"""Return the Distribution of the duration of the given task in the given mode.
as an Enumerable."""
raise NotImplementedError
def _get_task_duration_upper_bound(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
duration_vals = [x[0] for x in self.get_task_duration_distribution(task, mode).get_values()]
return max(duration_vals)
def _get_task_duration_lower_bound(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
duration_vals = [x[0] for x in self.get_task_duration_distribution(task, mode).get_values()]
return min(duration_vals)
class DeterministicTaskDuration(EnumerableTaskDuration):
"""A domain must inherit this class if the task durations are known and deterministic."""
def _sample_task_duration(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return a task duration for the given task in the given mode."""
return self.get_task_duration(task, mode, progress_from)
def get_task_duration(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return the fixed deterministic task duration of the given task in the given mode."""
return self._get_task_duration(task, mode, progress_from)
def _get_task_duration(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return the fixed deterministic task duration of the given task in the given mode."""
raise NotImplementedError
def _get_task_duration_distribution(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.,
multivariate_settings: Optional[Dict[str, int]] = None):
"""Return the Distribution of the duration of the given task in the given mode.
Because the duration is deterministic, the distribution always returns the same duration."""
return DiscreteDistribution([(self.get_task_duration(task, mode), 1)])
def _get_task_duration_upper_bound(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
return self.get_task_duration(task, mode)
def _get_task_duration_lower_bound(self, task: int, mode: Optional[int] = 1, progress_from: Optional[float]=0.) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
return self.get_task_duration(task, mode)
| 64.147368
| 224
| 0.704135
| 1,606
| 12,188
| 5.188045
| 0.079078
| 0.109458
| 0.06481
| 0.052208
| 0.831493
| 0.818651
| 0.794047
| 0.789246
| 0.754081
| 0.743759
| 0
| 0.006708
| 0.204956
| 12,188
| 189
| 225
| 64.486772
| 0.853148
| 0.316541
| 0
| 0.521277
| 0
| 0
| 0.023108
| 0.023108
| 0
| 0
| 0
| 0.010582
| 0
| 1
| 0.308511
| false
| 0
| 0.031915
| 0
| 0.62766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
b43ef27de9ee9c6d1de632c9a01a4d6f003fbdb9
| 292
|
py
|
Python
|
tests/Unit/PointwiseFunctions/GeneralRelativity/WeylElectricScalar.py
|
nilsvu/spectre
|
1455b9a8d7e92db8ad600c66f54795c29c3052ee
|
[
"MIT"
] | 117
|
2017-04-08T22:52:48.000Z
|
2022-03-25T07:23:36.000Z
|
tests/Unit/PointwiseFunctions/GeneralRelativity/WeylElectricScalar.py
|
GitHimanshuc/spectre
|
4de4033ba36547113293fe4dbdd77591485a4aee
|
[
"MIT"
] | 3,177
|
2017-04-07T21:10:18.000Z
|
2022-03-31T23:55:59.000Z
|
tests/Unit/PointwiseFunctions/GeneralRelativity/WeylElectricScalar.py
|
geoffrey4444/spectre
|
9350d61830b360e2d5b273fdd176dcc841dbefb0
|
[
"MIT"
] | 85
|
2017-04-07T19:36:13.000Z
|
2022-03-01T10:21:00.000Z
|
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
def weyl_electric_scalar(weyl_electric, inverse_spatial_metric):
return (np.einsum("ik,jl,ij,kl", weyl_electric, weyl_electric,
inverse_spatial_metric, inverse_spatial_metric))
| 32.444444
| 70
| 0.743151
| 40
| 292
| 5.15
| 0.65
| 0.23301
| 0.291262
| 0.252427
| 0.31068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178082
| 292
| 8
| 71
| 36.5
| 0.858333
| 0.215753
| 0
| 0
| 0
| 0
| 0.048673
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
c3316874b6915a62c4f5cac85704c698c65d6c64
| 18,116
|
py
|
Python
|
scripts/sarcasm_classifiers.py
|
ararifbd/sarcasm_detection_in_tweets_package
|
c0bc4cf56afee4cc601ff68da87a8dadc25da475
|
[
"MIT"
] | 1
|
2021-01-29T07:55:59.000Z
|
2021-01-29T07:55:59.000Z
|
scripts/sarcasm_classifiers.py
|
ararifbd/sarcasm_detection_in_tweets_package
|
c0bc4cf56afee4cc601ff68da87a8dadc25da475
|
[
"MIT"
] | null | null | null |
scripts/sarcasm_classifiers.py
|
ararifbd/sarcasm_detection_in_tweets_package
|
c0bc4cf56afee4cc601ff68da87a8dadc25da475
|
[
"MIT"
] | 1
|
2021-01-23T13:25:44.000Z
|
2021-01-23T13:25:44.000Z
|
# Import packages
import time
start_time = time.time()
import pandas as pd, os, numpy as np, csv, sys
from sklearn import metrics, model_selection
#from sklearn.model_selection import validation_curve
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
#from sklearn.model_selection import KFold
#from sklearn.metrics import accuracy_score
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor
#import matplotlib.pyplot as plt
# Read feature list in a dataframe
FEATURE_LIST_CSV_FILE_PATH = os.curdir + "\\..\\features\\features.csv"
df = pd.read_csv(FEATURE_LIST_CSV_FILE_PATH)
data = df
# Logistic Regression Model
def LR(data):
#How to change your accuracy for matching: Change the C value below between 1e8 and 1e-8
logreg = LogisticRegression(C=1e-6, multi_class='ovr', penalty='l2', random_state=0)
X = data.drop(['label'],axis=1) # all features
Y = data['label'] #Label or class, ground truth
predict = model_selection.cross_val_predict(logreg, X, Y, cv=10)
#print(metrics.classification_report(data['label'], predict))
#accuracy_score(y_true, y_pred, normalize=True, sample_weight=None)
# =============================================================================
# acc = []
# acc.append(metrics.accuracy_score(Y, predict))
# acc = (float(sum(acc) / len(acc)))
# =============================================================================
acc = metrics.accuracy_score(Y, predict)
#https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html
F1 = metrics.f1_score(Y, predict, zero_division=0)
P = metrics.precision_score(Y, predict, zero_division=0)
R = metrics.recall_score(Y, predict, zero_division=0)
return acc * 100, F1 * 100, P * 100, R * 100
# SVM Model
def SVM(data):
#https://stats.stackexchange.com/questions/31066/what-is-the-influence-of-c-in-svms-with-linear-kernel
SVM = SVC(C=0.1, kernel='linear')
X = data.drop(['label'],axis=1) # all features
Y = data['label'] #Label or class, ground truth
predict = model_selection.cross_val_predict(SVM, X, Y, cv=10)
#print(metrics.classification_report(data['label'], predict))
#accuracy_score(y_true, y_pred, normalize=True, sample_weight=None)
# =============================================================================
# acc = []
# acc.append(metrics.accuracy_score(Y, predict))
# acc = (float(sum(acc) / len(acc)))
# =============================================================================
acc = metrics.accuracy_score(Y, predict)
#https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html
F1 = metrics.f1_score(Y, predict, zero_division=0)
P = metrics.precision_score(Y, predict, zero_division=0)
R = metrics.recall_score(Y, predict, zero_division=0)
return acc * 100, F1 * 100, P * 100, R * 100
# Decision Tree model
def DT(data):
decision_classifier = DecisionTreeClassifier()
X = data.drop(['label'],axis=1) # all features
Y = data['label'] #Label or class, ground truth
predict = model_selection.cross_val_predict(decision_classifier, X, Y, cv=10)
#print(metrics.classification_report(data['label'], predict))
#accuracy_score(y_true, y_pred, normalize=True, sample_weight=None)
# =============================================================================
# acc = []
# acc.append(metrics.accuracy_score(Y, predict))
# acc = (float(sum(acc) / len(acc)))
# =============================================================================
acc = metrics.accuracy_score(Y, predict)
#https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html
F1 = metrics.f1_score(Y, predict, zero_division=0)
P = metrics.precision_score(Y, predict, zero_division=0)
R = metrics.recall_score(Y, predict, zero_division=0)
return acc * 100, F1 * 100, P * 100, R * 100
# Naive Bayes Model
def NB(data):
NB_classifier = GaussianNB()
X = data.drop(['label'],axis=1) # all features
Y = data['label'] #Label or class, ground truth
predict = model_selection.cross_val_predict(NB_classifier, X, Y, cv=10)
#print(metrics.classification_report(data['label'], predict))
#accuracy_score(y_true, y_pred, normalize=True, sample_weight=None)
# =============================================================================
# acc = []
# acc.append(metrics.accuracy_score(Y, predict))
# acc = (float(sum(acc) / len(acc)))
# =============================================================================
acc = metrics.accuracy_score(Y, predict)
#https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html
F1 = metrics.f1_score(Y, predict, zero_division=0)
P = metrics.precision_score(Y, predict, zero_division=0)
R = metrics.recall_score(Y, predict, zero_division=0)
return acc * 100, F1 * 100, P * 100, R * 100
# Random Forest Model
def RF(data):
RF_classifier = RandomForestRegressor(n_estimators = 1000, random_state = 42)
X = data.drop(['label'],axis=1) # all features
Y = data['label'] #Label or class, ground truth
predict = model_selection.cross_val_predict(RF_classifier, X, Y, cv=10)
predict = predict.round()
#print(metrics.classification_report(data['label'], predict))
#accuracy_score(y_true, y_pred, normalize=True, sample_weight=None)
# =============================================================================
# acc = []
# acc.append(metrics.accuracy_score(Y, predict))
# acc = (float(sum(acc) / len(acc)))
# =============================================================================
acc = metrics.accuracy_score(Y, predict)
#https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html
F1 = metrics.f1_score(Y, predict, zero_division=0)
P = metrics.precision_score(Y, predict, zero_division=0)
R = metrics.recall_score(Y, predict, zero_division=0)
return acc * 100, F1 * 100, P * 100, R * 100
features = [
#Lexical features
"Noun count",
"Verb count",
"Adverb count",
"Adjective count",
"Positive intensifier",
"Negative intensifier",
"Sentiment score",
#Sarcastic features
"Exclamation",
"Question marks",
"Ellipsis",
"Interjections",
"Repeat letters",
"Vowel repetition count",
"Uppercase",
"Repeat upper case segment",
"Emoji sentiment",
"Laughter count",
"Common sarcastic unigram count",
"Rare sarcastic unigram count",
"Sarcastic slang count",
"Repeated quote count",
"Hashtag sentiment score",
"Bigrams",
"Trigrams",
#Contrast base features
"Emoji tweet polarity flip",
"PWC after removing negation upto next word",
"NWC after removing negation upto next word",
"polarity flip after removing negation upto next word",
#Context-based features
"User mentions",
"Hash tag count"
]
feature_category = {
"Lexical":
[
"Noun count",
"Verb count",
"Adverb count",
"Adjective count",
"Positive intensifier",
"Negative intensifier",
"Sentiment score"
],
"Sarcastic":
[
"Exclamation",
"Question marks",
"Ellipsis",
"Interjections",
"Repeat letters",
"Vowel repetition count",
"Uppercase",
"Repeat upper case segment",
"Emoji sentiment",
"Laughter count",
"Common sarcastic unigram count",
"Rare sarcastic unigram count",
"Sarcastic slang count",
"Repeated quote count",
"Hashtag sentiment score",
"Bigrams",
"Trigrams"
],
"Contrast":
[
"Emoji tweet polarity flip",
"PWC after removing negation upto next word",
"NWC after removing negation upto next word",
"polarity flip after removing negation upto next word"
],
"Context":[
"User mentions",
"Hash tag count"
]
}
different_combinations = {
"sarcastic_lexical_features":
[
#Sarcastic features
"Exclamation",
"Question marks",
"Ellipsis",
"Interjections",
"Repeat letters",
"Vowel repetition count",
"Uppercase",
"Repeat upper case segment",
"Emoji sentiment",
"Laughter count",
"Common sarcastic unigram count",
"Rare sarcastic unigram count",
"Sarcastic slang count",
"Repeated quote count",
"Hashtag sentiment score",
"Bigrams",
"Trigrams",
#Lexical features
"Noun count",
"Verb count",
"Adverb count",
"Adjective count",
"Positive intensifier",
"Negative intensifier",
"Sentiment score",
],
"Sarcastic_contrast_features" :
[
#Sarcastic features
"Exclamation",
"Question marks",
"Ellipsis",
"Interjections",
"Repeat letters",
"Vowel repetition count",
"Uppercase",
"Repeat upper case segment",
"Emoji sentiment",
"Laughter count",
"Common sarcastic unigram count",
"Rare sarcastic unigram count",
"Sarcastic slang count",
"Repeated quote count",
"Hashtag sentiment score",
"Bigrams",
"Trigrams",
#Contrast base features
"Emoji tweet polarity flip",
"PWC after removing negation upto next word",
"NWC after removing negation upto next word",
"polarity flip after removing negation upto next word"
],
"Sarcastic_context_features":
[
#Sarcastic features
"Exclamation",
"Question marks",
"Ellipsis",
"Interjections",
"Repeat letters",
"Vowel repetition count",
"Uppercase",
"Repeat upper case segment",
"Emoji sentiment",
"Laughter count",
"Common sarcastic unigram count",
"Rare sarcastic unigram count",
"Sarcastic slang count",
"Repeated quote count",
"Hashtag sentiment score",
"Bigrams",
"Trigrams",
#Context-based features
"User mentions",
"Hash tag count"
],
"contrast_context_features":
[
#Contrast base features
"Emoji tweet polarity flip",
"PWC after removing negation upto next word",
"NWC after removing negation upto next word",
"polarity flip after removing negation upto next word",
#Context-based features
"User mentions",
"Hash tag count"
]
}
adding_features_incrementally = {
"sarcastic_features":
[
"Exclamation",
"Question marks",
"Ellipsis",
"Interjections",
"Repeat letters",
"Vowel repetition count",
"Uppercase",
"Repeat upper case segment",
"Emoji sentiment",
"Laughter count",
"Common sarcastic unigram count",
"Rare sarcastic unigram count",
"Sarcastic slang count",
"Repeated quote count",
"Hashtag sentiment score",
"Bigrams",
"Trigrams"
],
"Sarcastic_contrast_features" :
[
#Sarcastic features
"Exclamation",
"Question marks",
"Ellipsis",
"Interjections",
"Repeat letters",
"Vowel repetition count",
"Uppercase",
"Repeat upper case segment",
"Emoji sentiment",
"Laughter count",
"Common sarcastic unigram count",
"Rare sarcastic unigram count",
"Sarcastic slang count",
"Repeated quote count",
"Hashtag sentiment score",
"Bigrams",
"Trigrams",
#Contrast base features
"Emoji tweet polarity flip",
"PWC after removing negation upto next word",
"NWC after removing negation upto next word",
"polarity flip after removing negation upto next word"
],
"sarcastic_contrast_context_features":
[
#Sarcastic features
"Exclamation",
"Question marks",
"Ellipsis",
"Interjections",
"Repeat letters",
"Vowel repetition count",
"Uppercase",
"Repeat upper case segment",
"Emoji sentiment",
"Laughter count",
"Common sarcastic unigram count",
"Rare sarcastic unigram count",
"Sarcastic slang count",
"Repeated quote count",
"Hashtag sentiment score",
"Bigrams",
"Trigrams",
#Contrast base features
"Emoji tweet polarity flip",
"PWC after removing negation upto next word",
"NWC after removing negation upto next word",
"polarity flip after removing negation upto next word",
#Context-based features
"User mentions",
"Hash tag count"
],
"all_features":
[
#Sarcastic features
"Exclamation",
"Question marks",
"Ellipsis",
"Interjections",
"Repeat letters",
"Vowel repetition count",
"Uppercase",
"Repeat upper case segment",
"Emoji sentiment",
"Laughter count",
"Common sarcastic unigram count",
"Rare sarcastic unigram count",
"Sarcastic slang count",
"Repeated quote count",
"Hashtag sentiment score",
"Bigrams",
"Trigrams",
#Contrast base features
"Emoji tweet polarity flip",
"PWC after removing negation upto next word",
"NWC after removing negation upto next word",
"polarity flip after removing negation upto next word",
#Context-based features
"User mentions",
"Hash tag count",
#Lexical features
"Noun count",
"Verb count",
"Adverb count",
"Adjective count",
"Positive intensifier",
"Negative intensifier",
"Sentiment score"
]
}
#create result for individual algorithm and results creation may take several hours depending on algorithms
ML_Algorithms = {"DT":"DT","LR":"LR" ,"NB":"NB","SVM":"SVM", "RF":"RF"}
#change index in ML_Algorithms["DT"] to get result for another algorithm
ML_Algorithm = ML_Algorithms["DT"]
print ("Model: " + ML_Algorithm)
#create result according to individual feature
FEATURE_LIST_CSV_FILE_PATH = os.curdir + "\\..\\results\\"+ ML_Algorithm +"_Feature_Wise_Result.csv"
headers = ["Feature", "P", "R", "F1", "Acc"]
with open(FEATURE_LIST_CSV_FILE_PATH, "w", newline='') as header:
header = csv.writer(header)
header.writerow(headers)
with open(FEATURE_LIST_CSV_FILE_PATH, "a", newline='') as result_csv:
writer = csv.writer(result_csv)
for feature in features:
tiny_data = data[[feature, 'label']]
Acc, F1, P, R = eval(ML_Algorithm + "(tiny_data)") #LR(tiny_data) string to function call
writer.writerow([feature, "%.2f"%P, "%.2f"%R, "%.2f"%F1, "%.2f"%Acc])
#create result according to category
FEATURE_LIST_CSV_FILE_PATH = os.curdir + "\\..\\results\\"+ ML_Algorithm +"_Category_Wise_Result.csv"
with open(FEATURE_LIST_CSV_FILE_PATH, "w", newline='') as header:
header = csv.writer(header)
header.writerow(headers)
for (key, value) in feature_category.items():
with open(FEATURE_LIST_CSV_FILE_PATH, "a", newline='') as result_csv:
writer = csv.writer(result_csv)
#add label field at the end of category features
value.append("label")
tiny_data = data[value]
#eval can execute string as python code
Acc, F1, P, R = eval(ML_Algorithm + "(tiny_data)")
writer.writerow([key, "%.2f"%P, "%.2f"%R, "%.2f"%F1, "%.2f"%Acc])
#create result for incrementally added category
FEATURE_LIST_CSV_FILE_PATH = os.curdir + "\\..\\results\\"+ ML_Algorithm +"_Incrementally_Added_Category_Result.csv"
with open(FEATURE_LIST_CSV_FILE_PATH, "w", newline='') as header:
header = csv.writer(header)
header.writerow(headers)
for (key, value) in adding_features_incrementally.items():
with open(FEATURE_LIST_CSV_FILE_PATH, "a", newline='') as result_csv:
writer = csv.writer(result_csv)
#add label field at the end of category features
value.append("label")
tiny_data = data[value]
#eval can execute string as python code
Acc, F1, P, R = eval(ML_Algorithm + "(tiny_data)")
writer.writerow([key, "%.2f"%P, "%.2f"%R, "%.2f"%F1, "%.2f"%Acc])
#create result for different category combinations
FEATURE_LIST_CSV_FILE_PATH = os.curdir + "\\..\\results\\"+ ML_Algorithm +"_Category_Combination_Result.csv"
with open(FEATURE_LIST_CSV_FILE_PATH, "w", newline='') as header:
header = csv.writer(header)
header.writerow(headers)
for (key, value) in different_combinations.items():
with open(FEATURE_LIST_CSV_FILE_PATH, "a", newline='') as result_csv:
writer = csv.writer(result_csv)
#add label field at the end of category features
value.append("label")
tiny_data = data[value]
#eval can execute string as python code
Acc, F1, P, R = eval(ML_Algorithm + "(tiny_data)")
writer.writerow([key, "%.2f"%P, "%.2f"%R, "%.2f"%F1, "%.2f"%Acc])
print("Result has been created successfully.")
#calculate execution time
end_time = time.time() - start_time
total_minutes = int(end_time)/60
hours = total_minutes/60
minutes = total_minutes%60
seconds = int(end_time)%60
print("--- %d Hours %d Minutes %d Seconds ---" % (hours, minutes, seconds))
| 37.047035
| 116
| 0.594005
| 1,980
| 18,116
| 5.316667
| 0.133333
| 0.017099
| 0.030873
| 0.049872
| 0.816757
| 0.804978
| 0.804978
| 0.802128
| 0.797948
| 0.795098
| 0
| 0.012126
| 0.253422
| 18,116
| 489
| 117
| 37.047035
| 0.766211
| 0.226816
| 0
| 0.782723
| 0
| 0
| 0.358617
| 0.022647
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013089
| false
| 0
| 0.02356
| 0
| 0.049738
| 0.007853
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c37ad561fa05fac7919f23f952ecf7c338763a00
| 9,064
|
py
|
Python
|
default_user_agent.py
|
thedataincubator/scrapy-random-useragent
|
2af6ccf19d5131bceedfb6fff676cf316da9abda
|
[
"MIT"
] | 2
|
2016-12-16T18:10:31.000Z
|
2021-04-27T16:02:02.000Z
|
default_user_agent.py
|
thedataincubator/scrapy-random-useragent
|
2af6ccf19d5131bceedfb6fff676cf316da9abda
|
[
"MIT"
] | null | null | null |
default_user_agent.py
|
thedataincubator/scrapy-random-useragent
|
2af6ccf19d5131bceedfb6fff676cf316da9abda
|
[
"MIT"
] | 1
|
2017-02-03T15:31:21.000Z
|
2017-02-03T15:31:21.000Z
|
# the most common user agents by https://techblog.willshouse.com/2012/01/03/most-common-user-agents/
DEFAULT_USER_AGENTS = [
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/601.6.17 (KHTML, like Gecko) Version/9.1.1 Safari/601.6.17",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/601.5.17 (KHTML, like Gecko) Version/9.1 Safari/601.5.17",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Safari/537.36 Edge/13.10586",
"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/601.6.17 (KHTML, like Gecko) Version/9.1.1 Safari/601.6.17",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/601.4.4 (KHTML, like Gecko) Version/9.0.3 Safari/601.4.4",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.63 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/50.0.2661.102 Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; Trident/5.0)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; Trident/5.0)",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/601.5.17 (KHTML, like Gecko) Version/9.1 Safari/601.5.17",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E238 Safari/601.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.63 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.63 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13F69 Safari/601.1",
"Mozilla/5.0 (Windows NT 5.1; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.63 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.86 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:45.0) Gecko/20100101 Firefox/45.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/600.5.17 (KHTML, like Gecko) Version/8.0.5 Safari/600.5.17",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 (KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9",
"Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0",
"Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36",
]
| 103
| 140
| 0.710613
| 1,798
| 9,064
| 3.546162
| 0.051168
| 0.072146
| 0.11857
| 0.151506
| 0.954517
| 0.952321
| 0.932716
| 0.930521
| 0.920326
| 0.891311
| 0
| 0.247181
| 0.119373
| 9,064
| 88
| 141
| 103
| 0.551616
| 0.010812
| 0
| 0
| 0
| 0.976744
| 0.940658
| 0.004908
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
5ef75e73d26ef6eb897802766a377f556627b48c
| 6,257
|
py
|
Python
|
sdk/python/pulumi_gcp/assuredworkloads/_inputs.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/assuredworkloads/_inputs.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/assuredworkloads/_inputs.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'WorkloadKmsSettingsArgs',
'WorkloadResourceArgs',
'WorkloadResourceSettingArgs',
]
@pulumi.input_type
class WorkloadKmsSettingsArgs:
def __init__(__self__, *,
next_rotation_time: pulumi.Input[str],
rotation_period: pulumi.Input[str]):
"""
:param pulumi.Input[str] next_rotation_time: Required. Input only. Immutable. The time at which the Key Management Service will automatically create a new version of the crypto key and mark it as the primary.
:param pulumi.Input[str] rotation_period: Required. Input only. Immutable. will be advanced by this period when the Key Management Service automatically rotates a key. Must be at least 24 hours and at most 876,000 hours.
"""
pulumi.set(__self__, "next_rotation_time", next_rotation_time)
pulumi.set(__self__, "rotation_period", rotation_period)
@property
@pulumi.getter(name="nextRotationTime")
def next_rotation_time(self) -> pulumi.Input[str]:
"""
Required. Input only. Immutable. The time at which the Key Management Service will automatically create a new version of the crypto key and mark it as the primary.
"""
return pulumi.get(self, "next_rotation_time")
@next_rotation_time.setter
def next_rotation_time(self, value: pulumi.Input[str]):
pulumi.set(self, "next_rotation_time", value)
@property
@pulumi.getter(name="rotationPeriod")
def rotation_period(self) -> pulumi.Input[str]:
"""
Required. Input only. Immutable. will be advanced by this period when the Key Management Service automatically rotates a key. Must be at least 24 hours and at most 876,000 hours.
"""
return pulumi.get(self, "rotation_period")
@rotation_period.setter
def rotation_period(self, value: pulumi.Input[str]):
pulumi.set(self, "rotation_period", value)
@pulumi.input_type
class WorkloadResourceArgs:
def __init__(__self__, *,
resource_id: Optional[pulumi.Input[int]] = None,
resource_type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] resource_id: Resource identifier. For a project this represents project_number. If the project is already taken, the workload creation will fail.
:param pulumi.Input[str] resource_type: Indicates the type of resource. This field should be specified to correspond the id to the right project type (CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER
"""
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if resource_type is not None:
pulumi.set(__self__, "resource_type", resource_type)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[int]]:
"""
Resource identifier. For a project this represents project_number. If the project is already taken, the workload creation will fail.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> Optional[pulumi.Input[str]]:
"""
Indicates the type of resource. This field should be specified to correspond the id to the right project type (CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER
"""
return pulumi.get(self, "resource_type")
@resource_type.setter
def resource_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_type", value)
@pulumi.input_type
class WorkloadResourceSettingArgs:
def __init__(__self__, *,
resource_id: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] resource_id: Resource identifier. For a project this represents project_number. If the project is already taken, the workload creation will fail.
:param pulumi.Input[str] resource_type: Indicates the type of resource. This field should be specified to correspond the id to the right project type (CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER
"""
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if resource_type is not None:
pulumi.set(__self__, "resource_type", resource_type)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
Resource identifier. For a project this represents project_number. If the project is already taken, the workload creation will fail.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> Optional[pulumi.Input[str]]:
"""
Indicates the type of resource. This field should be specified to correspond the id to the right project type (CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER
"""
return pulumi.get(self, "resource_type")
@resource_type.setter
def resource_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_type", value)
| 47.401515
| 315
| 0.706249
| 794
| 6,257
| 5.360202
| 0.164987
| 0.069784
| 0.065789
| 0.046523
| 0.851504
| 0.789474
| 0.7836
| 0.766682
| 0.722509
| 0.722509
| 0
| 0.003416
| 0.204731
| 6,257
| 131
| 316
| 47.763359
| 0.851889
| 0.433275
| 0
| 0.545455
| 1
| 0
| 0.116918
| 0.015106
| 0
| 0
| 0
| 0
| 0
| 1
| 0.194805
| false
| 0
| 0.064935
| 0
| 0.376623
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f092dcbc7d7cc2599693994c915e1ea2805c601
| 1,375
|
py
|
Python
|
octicons16px/thumbsdown.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | 1
|
2021-01-28T06:47:39.000Z
|
2021-01-28T06:47:39.000Z
|
octicons16px/thumbsdown.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | null | null | null |
octicons16px/thumbsdown.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | null | null | null |
OCTICON_THUMBSDOWN = """
<svg class="octicon octicon-thumbsdown" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M7.083 15.986c1.34.153 2.334-.982 2.334-2.183v-.5c0-1.329.646-2.123 1.317-2.614.329-.24.66-.403.919-.508a1.75 1.75 0 001.514.872h1a1.75 1.75 0 001.75-1.75v-7.5a1.75 1.75 0 00-1.75-1.75h-1a1.75 1.75 0 00-1.662 1.2c-.525-.074-1.068-.228-1.726-.415L9.305.705C8.151.385 6.765.053 4.917.053c-1.706 0-2.97.152-3.722 1.139-.353.463-.537 1.042-.669 1.672C.41 3.424.32 4.108.214 4.897l-.04.306c-.25 1.869-.266 3.318.188 4.316.244.537.622.943 1.136 1.2.495.248 1.066.334 1.669.334h1.422l-.015.112c-.07.518-.157 1.17-.157 1.638 0 .921.151 1.718.655 2.299.512.589 1.248.797 2.011.884zm4.334-13.232c-.706-.089-1.39-.284-2.072-.479a63.914 63.914 0 00-.441-.125c-1.096-.304-2.335-.597-3.987-.597-1.794 0-2.28.222-2.529.548-.147.193-.275.505-.393 1.07-.105.502-.188 1.124-.295 1.93l-.04.3c-.25 1.882-.19 2.933.067 3.497a.921.921 0 00.443.48c.208.104.52.175.997.175h1.75c.685 0 1.295.577 1.205 1.335-.022.192-.049.39-.075.586-.066.488-.13.97-.13 1.329 0 .808.144 1.15.288 1.316.137.157.401.303 1.048.377.307.035.664-.237.664-.693v-.5c0-1.922.978-3.127 1.932-3.825a5.862 5.862 0 011.568-.809V2.754zm1.75 6.798a.25.25 0 01-.25-.25v-7.5a.25.25 0 01.25-.25h1a.25.25 0 01.25.25v7.5a.25.25 0 01-.25.25h-1z"></path></svg>
"""
| 275
| 1,344
| 0.674909
| 361
| 1,375
| 2.567867
| 0.573407
| 0.019417
| 0.021575
| 0.02589
| 0.081985
| 0.04315
| 0
| 0
| 0
| 0
| 0
| 0.598284
| 0.067636
| 1,375
| 4
| 1,345
| 343.75
| 0.124805
| 0
| 0
| 0
| 0
| 0.333333
| 0.979622
| 0.44687
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6f0ce0507b75e48d4352993d2bb797d05e498f57
| 3,551
|
py
|
Python
|
feel_it/feel_it.py
|
MilaNLProc/feel-
|
15e27ad52d7932e42c2aeb4d3cb6926584d5b02f
|
[
"MIT"
] | 30
|
2021-03-17T14:59:01.000Z
|
2022-03-22T15:38:45.000Z
|
feel_it/feel_it.py
|
MilaNLProc/feel-
|
15e27ad52d7932e42c2aeb4d3cb6926584d5b02f
|
[
"MIT"
] | 3
|
2021-06-12T08:04:03.000Z
|
2021-09-11T07:24:37.000Z
|
feel_it/feel_it.py
|
MilaNLProc/feel-
|
15e27ad52d7932e42c2aeb4d3cb6926584d5b02f
|
[
"MIT"
] | 4
|
2021-04-08T03:46:15.000Z
|
2021-11-23T18:10:55.000Z
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
from feel_it.dataset import TextDataset
class SentimentClassifier:
def __init__(self):
"""
Simple class initialization for the sentiment classifier,
the sentiment classification model is going to be downloaded
directly from huggingface
"""
self.sentiment_map = {0: "negative", 1: "positive"}
self.tokenizer = AutoTokenizer.from_pretrained("MilaNLProc/feel-it-italian-sentiment")
self.model = AutoModelForSequenceClassification.from_pretrained("MilaNLProc/feel-it-italian-sentiment")
self.model.eval()
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def predict(self, sentences, batch_size=32):
"""
Predicts the sentiment for the sentences in input
@param sentences: sentences to be classified with the sentiment classifier
@param batch_size: batch size for the network
@return:
"""
train_encodings = self.tokenizer(sentences,
truncation=True,
padding=True,
max_length=500)
train_dataset = TextDataset(train_encodings)
loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size)
collect_outputs = []
with torch.no_grad():
for batch in loader:
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
outputs = self.model(input_ids, attention_mask=attention_mask)
collect_outputs.extend(torch.argmax(outputs["logits"], axis=1).cpu().numpy().tolist())
return [self.sentiment_map[k] for k in collect_outputs]
class EmotionClassifier:
def __init__(self):
"""
Simple class initialization for the emotion classifier,
the emotion classification model is going to be downloaded
directly from huggingface
"""
self.emotion_map = {0: "anger", 1: "fear", 2 : "joy", 3: "sadness"}
self.tokenizer = AutoTokenizer.from_pretrained("MilaNLProc/feel-it-italian-emotion")
self.model = AutoModelForSequenceClassification.from_pretrained("MilaNLProc/feel-it-italian-emotion")
self.model.eval()
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def predict(self, sentences, batch_size=32):
"""
Predicts the emotion for the sentences in input
@param sentences: sentences to be classified with the emotion classifier
@param batch_size: batch size for the network
@return:
"""
train_encodings = self.tokenizer(sentences,
truncation=True,
padding=True,
max_length=500)
train_dataset = TextDataset(train_encodings)
loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size)
collect_outputs = []
with torch.no_grad():
for batch in loader:
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
outputs = self.model(input_ids, attention_mask=attention_mask)
collect_outputs.extend(torch.argmax(outputs["logits"], axis=1).cpu().numpy().tolist())
return [self.emotion_map[k] for k in collect_outputs]
| 41.776471
| 111
| 0.629119
| 379
| 3,551
| 5.738786
| 0.245383
| 0.041379
| 0.044138
| 0.051494
| 0.848736
| 0.848736
| 0.848736
| 0.826667
| 0.788046
| 0.641839
| 0
| 0.007059
| 0.281892
| 3,551
| 84
| 112
| 42.27381
| 0.845882
| 0.180513
| 0
| 0.723404
| 0
| 0
| 0.090245
| 0.051151
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.06383
| 0
| 0.234043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f558c30dbde227e7f84763f190434764075c24c
| 239
|
py
|
Python
|
arrayfiles/__init__.py
|
codacy-badger/arrayfiles
|
8d1f583e9a8a9fcae77912048cc1cf9a2590efef
|
[
"MIT"
] | null | null | null |
arrayfiles/__init__.py
|
codacy-badger/arrayfiles
|
8d1f583e9a8a9fcae77912048cc1cf9a2590efef
|
[
"MIT"
] | null | null | null |
arrayfiles/__init__.py
|
codacy-badger/arrayfiles
|
8d1f583e9a8a9fcae77912048cc1cf9a2590efef
|
[
"MIT"
] | null | null | null |
from arrayfiles.core import TextFile # NOQA
from arrayfiles.core import CsvFile # NOQA
from arrayfiles.core import CustomNewlineTextFile # NOQA
from arrayfiles.core import read_text # NOQA
from arrayfiles.core import read_csv # NOQA
| 34.142857
| 57
| 0.807531
| 32
| 239
| 5.96875
| 0.34375
| 0.366492
| 0.471204
| 0.628272
| 0.628272
| 0.335079
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150628
| 239
| 6
| 58
| 39.833333
| 0.940887
| 0.100418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
48916fd0fa9a5220347b432fdf3d848bc6098721
| 1,198
|
py
|
Python
|
temboo/core/Library/Dropbox/Datastore/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/Dropbox/Datastore/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/Dropbox/Datastore/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
from temboo.Library.Dropbox.Datastore.Await import Await, AwaitInputSet, AwaitResultSet, AwaitChoreographyExecution
from temboo.Library.Dropbox.Datastore.DeleteDatastore import DeleteDatastore, DeleteDatastoreInputSet, DeleteDatastoreResultSet, DeleteDatastoreChoreographyExecution
from temboo.Library.Dropbox.Datastore.GetDeltas import GetDeltas, GetDeltasInputSet, GetDeltasResultSet, GetDeltasChoreographyExecution
from temboo.Library.Dropbox.Datastore.GetOrCreateDatastore import GetOrCreateDatastore, GetOrCreateDatastoreInputSet, GetOrCreateDatastoreResultSet, GetOrCreateDatastoreChoreographyExecution
from temboo.Library.Dropbox.Datastore.GetSnapshot import GetSnapshot, GetSnapshotInputSet, GetSnapshotResultSet, GetSnapshotChoreographyExecution
from temboo.Library.Dropbox.Datastore.InsertRecord import InsertRecord, InsertRecordInputSet, InsertRecordResultSet, InsertRecordChoreographyExecution
from temboo.Library.Dropbox.Datastore.ListDatastores import ListDatastores, ListDatastoresInputSet, ListDatastoresResultSet, ListDatastoresChoreographyExecution
from temboo.Library.Dropbox.Datastore.PutDelta import PutDelta, PutDeltaInputSet, PutDeltaResultSet, PutDeltaChoreographyExecution
| 133.111111
| 190
| 0.906511
| 88
| 1,198
| 12.340909
| 0.431818
| 0.073665
| 0.12523
| 0.176796
| 0.243094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046745
| 1,198
| 8
| 191
| 149.75
| 0.950963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
48926854eecac0cf262a33c6c112f6b623b0e927
| 10,197
|
py
|
Python
|
kratos/tests/test_levelset_convection.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 778
|
2017-01-27T16:29:17.000Z
|
2022-03-30T03:01:51.000Z
|
kratos/tests/test_levelset_convection.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 6,634
|
2017-01-15T22:56:13.000Z
|
2022-03-31T15:03:36.000Z
|
kratos/tests/test_levelset_convection.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 224
|
2017-02-07T14:12:49.000Z
|
2022-03-06T23:09:34.000Z
|
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as KratosUnittest
import os
# from KratosMultiphysics.gid_output_process import GiDOutputProcess
def GetFilePath(fileName):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName)
def BaseDistance(x, y, z):
if (x <= 5.0):
return -0.16*x**2 + 0.8*x
else:
return 0.0
def BaseJumpedDistance(x, y, z):
if (x >= 5.0 and x <= 15.0):
return 1.0
else:
return 0.0
def ConvectionVelocity(x, y, z):
vel = KratosMultiphysics.Vector(3, 0.0)
vel[0] = 1.0
return vel
class TestLevelSetConvection(KratosUnittest.TestCase):
def tearDown(self):
# Remove the .time file
try:
os.remove('levelset_convection_process_mesh.time')
except :
pass
def test_levelset_convection(self):
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("Main")
model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DISTANCE)
model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)
KratosMultiphysics.ModelPartIO(GetFilePath("auxiliar_files_for_python_unittest/mdpa_files/levelset_convection_process_mesh")).ReadModelPart(model_part)
model_part.SetBufferSize(2)
for node in model_part.Nodes:
node.SetSolutionStepValue(KratosMultiphysics.DISTANCE, 0, BaseDistance(node.X,node.Y,node.Z))
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY, 0, ConvectionVelocity(node.X,node.Y,node.Z))
for node in model_part.Nodes:
if node.X < 0.001:
node.Fix(KratosMultiphysics.DISTANCE)
from KratosMultiphysics import python_linear_solver_factory as linear_solver_factory
linear_solver = linear_solver_factory.ConstructSolver(
KratosMultiphysics.Parameters("""{"solver_type" : "skyline_lu_factorization"}"""))
model_part.CloneTimeStep(40.0)
levelset_convection_settings = KratosMultiphysics.Parameters("""{
"max_CFL" : 1.0,
"max_substeps" : 0,
"eulerian_error_compensation" : false,
"element_type" : "levelset_convection_supg"
}""")
KratosMultiphysics.LevelSetConvectionProcess2D(
model_part,
linear_solver,
levelset_convection_settings).Execute()
max_distance = -1.0
min_distance = +1.0
for node in model_part.Nodes:
d = node.GetSolutionStepValue(KratosMultiphysics.DISTANCE)
max_distance = max(max_distance, d)
min_distance = min(min_distance, d)
self.assertAlmostEqual(max_distance, 0.733304104543163)
self.assertAlmostEqual(min_distance,-0.06371359024393097)
def test_levelset_convection_BFECC(self):
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("Main")
model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DISTANCE)
model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)
KratosMultiphysics.ModelPartIO(GetFilePath("auxiliar_files_for_python_unittest/mdpa_files/levelset_convection_process_mesh")).ReadModelPart(model_part)
model_part.SetBufferSize(2)
model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, 2)
for node in model_part.Nodes:
node.SetSolutionStepValue(KratosMultiphysics.DISTANCE, BaseJumpedDistance(node.X,node.Y,node.Z))
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY, ConvectionVelocity(node.X,node.Y,node.Z))
for node in model_part.Nodes:
if node.X < 0.001:
node.Fix(KratosMultiphysics.DISTANCE)
from KratosMultiphysics import python_linear_solver_factory as linear_solver_factory
linear_solver = linear_solver_factory.ConstructSolver(
KratosMultiphysics.Parameters("""{"solver_type" : "skyline_lu_factorization"}"""))
model_part.CloneTimeStep(30.0)
KratosMultiphysics.FindGlobalNodalNeighboursProcess(model_part).Execute()
levelset_convection_settings = KratosMultiphysics.Parameters("""{
"max_CFL" : 1.0,
"max_substeps" : 0,
"eulerian_error_compensation" : true,
"element_type" : "levelset_convection_supg"
}""")
KratosMultiphysics.LevelSetConvectionProcess2D(
model_part,
linear_solver,
levelset_convection_settings).Execute()
max_distance = -1.0
min_distance = +1.0
for node in model_part.Nodes:
d = node.GetSolutionStepValue(KratosMultiphysics.DISTANCE)
max_distance = max(max_distance, d)
min_distance = min(min_distance, d)
# gid_output = GiDOutputProcess(model_part,
# "levelset_test_2D_supg",
# KratosMultiphysics.Parameters("""
# {
# "result_file_configuration" : {
# "gidpost_flags": {
# "GiDPostMode": "GiD_PostBinary",
# "WriteDeformedMeshFlag": "WriteUndeformed",
# "WriteConditionsFlag": "WriteConditions",
# "MultiFileFlag": "SingleFile"
# },
# "nodal_results" : ["DISTANCE","VELOCITY"]
# }
# }
# """)
# )
# gid_output.ExecuteInitialize()
# gid_output.ExecuteBeforeSolutionLoop()
# gid_output.ExecuteInitializeSolutionStep()
# gid_output.PrintOutput()
# gid_output.ExecuteFinalizeSolutionStep()
# gid_output.ExecuteFinalize()
self.assertAlmostEqual(max_distance, 1.0634680107706003)
self.assertAlmostEqual(min_distance, -0.06361967738862996)
def test_levelset_convection_BFECC_algebraic(self):
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("Main")
model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DISTANCE)
model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)
KratosMultiphysics.ModelPartIO(GetFilePath("auxiliar_files_for_python_unittest/mdpa_files/levelset_convection_process_mesh")).ReadModelPart(model_part)
model_part.SetBufferSize(2)
model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, 2)
for node in model_part.Nodes:
node.SetSolutionStepValue(KratosMultiphysics.DISTANCE, BaseJumpedDistance(node.X,node.Y,node.Z))
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY, ConvectionVelocity(node.X,node.Y,node.Z))
for node in model_part.Nodes:
if node.X < 0.001:
node.Fix(KratosMultiphysics.DISTANCE)
from KratosMultiphysics import python_linear_solver_factory as linear_solver_factory
linear_solver = linear_solver_factory.ConstructSolver(
KratosMultiphysics.Parameters("""{"solver_type" : "skyline_lu_factorization"}"""))
model_part.CloneTimeStep(10.0)
KratosMultiphysics.FindGlobalNodalNeighboursProcess(model_part).Execute()
levelset_convection_settings = KratosMultiphysics.Parameters("""{
"max_CFL" : 0.2,
"max_substeps" : 0,
"eulerian_error_compensation" : true,
"element_type" : "levelset_convection_algebraic_stabilization",
"element_settings" : {
"include_anti_diffusivity_terms" : true
}
}""")
KratosMultiphysics.LevelSetConvectionProcess2D(
model_part,
linear_solver,
levelset_convection_settings).Execute()
max_distance = -1.0
min_distance = +1.0
for node in model_part.Nodes:
d = node.GetSolutionStepValue(KratosMultiphysics.DISTANCE)
max_distance = max(max_distance, d)
min_distance = min(min_distance, d)
# gid_output = GiDOutputProcess(model_part,
# "levelset_test_2D_algebraic_new",
# KratosMultiphysics.Parameters("""
# {
# "result_file_configuration" : {
# "gidpost_flags": {
# "GiDPostMode": "GiD_PostBinary",
# "WriteDeformedMeshFlag": "WriteUndeformed",
# "WriteConditionsFlag": "WriteConditions",
# "MultiFileFlag": "SingleFile"
# },
# "nodal_results" : ["DISTANCE","VELOCITY"]
# }
# }
# """)
# )
# gid_output.ExecuteInitialize()
# gid_output.ExecuteBeforeSolutionLoop()
# gid_output.ExecuteInitializeSolutionStep()
# gid_output.PrintOutput()
# gid_output.ExecuteFinalizeSolutionStep()
# gid_output.ExecuteFinalize()
self.assertAlmostEqual(max_distance, 1.0001864678812689)
self.assertAlmostEqual(min_distance, -0.00023748611723155408)
if __name__ == '__main__':
KratosUnittest.main()
| 45.119469
| 160
| 0.586937
| 826
| 10,197
| 6.976998
| 0.179177
| 0.056221
| 0.014055
| 0.021864
| 0.862051
| 0.829256
| 0.829256
| 0.826479
| 0.826479
| 0.826479
| 0
| 0.027273
| 0.331176
| 10,197
| 226
| 161
| 45.119469
| 0.817595
| 0.226439
| 0
| 0.690647
| 0
| 0
| 0.142201
| 0.074711
| 0
| 0
| 0
| 0
| 0.043165
| 0
| null | null | 0.007194
| 0.043165
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5b11172471718634955551ef49254b6fdc5f1a5d
| 174
|
py
|
Python
|
reid/__init__.py
|
eddielyc/Augmented-Geometric-Distillation
|
029973b7ce3c08fa1f0fa4dab27981d2148986a3
|
[
"Apache-2.0"
] | 3
|
2022-03-10T05:56:04.000Z
|
2022-03-12T07:32:59.000Z
|
reid/__init__.py
|
eddielyc/Augmented-Geometric-Distillation
|
029973b7ce3c08fa1f0fa4dab27981d2148986a3
|
[
"Apache-2.0"
] | 1
|
2022-03-10T06:00:19.000Z
|
2022-03-24T06:52:23.000Z
|
reid/__init__.py
|
eddielyc/Augmented-Geometric-Distillation
|
029973b7ce3c08fa1f0fa4dab27981d2148986a3
|
[
"Apache-2.0"
] | null | null | null |
from reid.evaluation import *
from reid.loss import *
from reid.models import *
from reid.utils import *
from reid.evaluation.evaluators import *
from reid.trainers import *
| 24.857143
| 40
| 0.787356
| 25
| 174
| 5.48
| 0.36
| 0.350365
| 0.510949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 174
| 6
| 41
| 29
| 0.913333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5b24645bd65c130f415bd530fac9d9aec3b72806
| 3,830
|
py
|
Python
|
lib_pypy/_pypy_winbase_cffi.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 381
|
2018-08-18T03:37:22.000Z
|
2022-02-06T23:57:36.000Z
|
lib_pypy/_pypy_winbase_cffi.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 16
|
2018-09-22T18:12:47.000Z
|
2022-02-22T20:03:59.000Z
|
lib_pypy/_pypy_winbase_cffi.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
# auto-generated file
import _cffi_backend
ffi = _cffi_backend.FFI('_pypy_winbase_cffi',
_version = 0x2601,
_types = b'\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x09\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x19\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x64\x03\x00\x00\x13\x11\x00\x00\x67\x03\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x13\x11\x00\x00\x13\x11\x00\x00\x63\x03\x00\x00\x62\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x03\x00\x00\x1F\x11\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x08\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x18\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x1F\x11\x00\x00\x0A\x01\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x5B\x03\x00\x00\x39\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x39\x11\x00\x00\x39\x11\x00\x00\x1B\x11\x00\x00\x1C\x11\x00\x00\x02\x0F\x00\x00\x0D\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x29\x0D\x00\x00\x08\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x15\x11\x00\x00\x39\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x02\x0F\x00\x00\x56\x0D\x00\x00\x06\x01\x00\x00\x00\x0F\x00\x00\x56\x0D\x00\x00\x00\x0F\x00\x00\x56\x0D\x00\x00\x10\x01\x00\x00\x00\x0F\x00\x00\x15\x0D\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x15\x0D\x00\x00\x02\x0F\x00\x00\x00\x09\x00\x00\x01\x09\x00\x00\x02\x01\x00\x00\x66\x03\x00\x00\x04\x01\x00\x00\x00\x01',
_globals = (b'\x00\x00\x24\x23CloseHandle',0,b'\x00\x00\x1E\x23CreatePipe',0,b'\x00\x00\x12\x23CreateProcessA',0,b'\x00\x00\x38\x23CreateProcessW',0,b'\x00\x00\x2F\x23DuplicateHandle',0,b'\x00\x00\x60\x23GetCurrentProcess',0,b'\x00\x00\x2B\x23GetExitCodeProcess',0,b'\x00\x00\x4E\x23GetModuleFileNameW',0,b'\x00\x00\x5D\x23GetStdHandle',0,b'\x00\x00\x53\x23GetVersion',0,b'\xFF\xFF\xFF\x1FSEM_FAILCRITICALERRORS',1,b'\xFF\xFF\xFF\x1FSEM_NOALIGNMENTFAULTEXCEPT',4,b'\xFF\xFF\xFF\x1FSEM_NOGPFAULTERRORBOX',2,b'\xFF\xFF\xFF\x1FSEM_NOOPENFILEERRORBOX',32768,b'\x00\x00\x47\x23SetErrorMode',0,b'\x00\x00\x27\x23TerminateProcess',0,b'\x00\x00\x4A\x23WaitForSingleObject',0,b'\x00\x00\x44\x23_get_osfhandle',0,b'\x00\x00\x10\x23_getch',0,b'\x00\x00\x10\x23_getche',0,b'\x00\x00\x58\x23_getwch',0,b'\x00\x00\x58\x23_getwche',0,b'\x00\x00\x10\x23_kbhit',0,b'\x00\x00\x07\x23_locking',0,b'\x00\x00\x0C\x23_open_osfhandle',0,b'\x00\x00\x00\x23_putch',0,b'\x00\x00\x5A\x23_putwch',0,b'\x00\x00\x03\x23_setmode',0,b'\x00\x00\x00\x23_ungetch',0,b'\x00\x00\x55\x23_ungetwch',0),
_struct_unions = ((b'\x00\x00\x00\x62\x00\x00\x00\x02$PROCESS_INFORMATION',b'\x00\x00\x15\x11hProcess',b'\x00\x00\x15\x11hThread',b'\x00\x00\x18\x11dwProcessId',b'\x00\x00\x18\x11dwThreadId'),(b'\x00\x00\x00\x63\x00\x00\x00\x02$STARTUPINFO',b'\x00\x00\x18\x11cb',b'\x00\x00\x13\x11lpReserved',b'\x00\x00\x13\x11lpDesktop',b'\x00\x00\x13\x11lpTitle',b'\x00\x00\x18\x11dwX',b'\x00\x00\x18\x11dwY',b'\x00\x00\x18\x11dwXSize',b'\x00\x00\x18\x11dwYSize',b'\x00\x00\x18\x11dwXCountChars',b'\x00\x00\x18\x11dwYCountChars',b'\x00\x00\x18\x11dwFillAttribute',b'\x00\x00\x18\x11dwFlags',b'\x00\x00\x56\x11wShowWindow',b'\x00\x00\x56\x11cbReserved2',b'\x00\x00\x65\x11lpReserved2',b'\x00\x00\x15\x11hStdInput',b'\x00\x00\x15\x11hStdOutput',b'\x00\x00\x15\x11hStdError')),
_typenames = (b'\x00\x00\x00\x1CLPPROCESS_INFORMATION',b'\x00\x00\x00\x1BLPSTARTUPINFO',b'\x00\x00\x00\x62PROCESS_INFORMATION',b'\x00\x00\x00\x63STARTUPINFO',b'\x00\x00\x00\x56wint_t'),
)
| 348.181818
| 1,682
| 0.751436
| 795
| 3,830
| 3.574843
| 0.171069
| 0.386348
| 0.137931
| 0.067558
| 0.467628
| 0.423997
| 0.366291
| 0.316678
| 0.316678
| 0.316678
| 0
| 0.335099
| 0.01436
| 3,830
| 10
| 1,683
| 383
| 0.417748
| 0.004961
| 0
| 0
| 1
| 0.125
| 0.878916
| 0.859437
| 0
| 1
| 0.001579
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d2a0621dfc4b5a82a4bd263afbfd7a31e9c00b58
| 47
|
py
|
Python
|
jaseci_kit/jaseci_kit/use_enc.py
|
Gorgeous-Patrick/jaseci
|
b423165fefbbc9574cd4467ee05728add7f47e5a
|
[
"MIT"
] | 6
|
2021-10-30T03:35:36.000Z
|
2022-02-10T02:06:18.000Z
|
jaseci_kit/jaseci_kit/use_enc.py
|
Gorgeous-Patrick/jaseci
|
b423165fefbbc9574cd4467ee05728add7f47e5a
|
[
"MIT"
] | 85
|
2021-10-29T22:47:39.000Z
|
2022-03-31T06:11:52.000Z
|
jaseci_kit/jaseci_kit/use_enc.py
|
Gorgeous-Patrick/jaseci
|
b423165fefbbc9574cd4467ee05728add7f47e5a
|
[
"MIT"
] | 12
|
2021-11-03T17:29:22.000Z
|
2022-03-30T16:01:53.000Z
|
from .modules.use_enc.use_enc import * # noqa
| 23.5
| 46
| 0.744681
| 8
| 47
| 4.125
| 0.75
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 1
| 47
| 47
| 0.825
| 0.085106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d2acd4361f92fe286ab13688a9174ce7c5465755
| 140
|
py
|
Python
|
html/webappapis/dynamic-markup-insertion/opening-the-input-stream/resources/http-refresh.py
|
Ms2ger/web-platform-tests
|
645c0e8a5c028a613e7ad1732834100dbe946fc7
|
[
"BSD-3-Clause"
] | 1
|
2019-04-14T20:17:04.000Z
|
2019-04-14T20:17:04.000Z
|
html/webappapis/dynamic-markup-insertion/opening-the-input-stream/resources/http-refresh.py
|
Ms2ger/web-platform-tests
|
645c0e8a5c028a613e7ad1732834100dbe946fc7
|
[
"BSD-3-Clause"
] | 14
|
2019-03-18T20:11:48.000Z
|
2019-04-23T22:41:46.000Z
|
html/webappapis/dynamic-markup-insertion/opening-the-input-stream/resources/http-refresh.py
|
Ms2ger/web-platform-tests
|
645c0e8a5c028a613e7ad1732834100dbe946fc7
|
[
"BSD-3-Clause"
] | 1
|
2021-01-04T15:55:59.000Z
|
2021-01-04T15:55:59.000Z
|
def main(request, response):
time = request.url_parts.query if request.url_parts.query else '0'
return 200, [['Refresh', time]], ''
| 35
| 70
| 0.678571
| 20
| 140
| 4.65
| 0.7
| 0.215054
| 0.322581
| 0.430108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034188
| 0.164286
| 140
| 3
| 71
| 46.666667
| 0.760684
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
d2b11769f31c3b95c69948004bba0728f69e494d
| 19,498
|
py
|
Python
|
sdk/python/pulumi_aws/ec2/vpc_dhcp_options.py
|
jen20/pulumi-aws
|
172e00c642adc03238f89cc9c5a16b914a77c2b1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/vpc_dhcp_options.py
|
jen20/pulumi-aws
|
172e00c642adc03238f89cc9c5a16b914a77c2b1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/vpc_dhcp_options.py
|
jen20/pulumi-aws
|
172e00c642adc03238f89cc9c5a16b914a77c2b1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = ['VpcDhcpOptionsArgs', 'VpcDhcpOptions']
@pulumi.input_type
class VpcDhcpOptionsArgs:
def __init__(__self__, *,
domain_name: Optional[pulumi.Input[str]] = None,
domain_name_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
netbios_name_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
netbios_node_type: Optional[pulumi.Input[str]] = None,
ntp_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a VpcDhcpOptions resource.
:param pulumi.Input[str] domain_name: the suffix domain name to use by default when resolving non Fully Qualified Domain Names. In other words, this is what ends up being the `search` value in the `/etc/resolv.conf` file.
:param pulumi.Input[Sequence[pulumi.Input[str]]] domain_name_servers: List of name servers to configure in `/etc/resolv.conf`. If you want to use the default AWS nameservers you should set this to `AmazonProvidedDNS`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] netbios_name_servers: List of NETBIOS name servers.
:param pulumi.Input[str] netbios_node_type: The NetBIOS node type (1, 2, 4, or 8). AWS recommends to specify 2 since broadcast and multicast are not supported in their network. For more information about these node types, see [RFC 2132](http://www.ietf.org/rfc/rfc2132.txt).
:param pulumi.Input[Sequence[pulumi.Input[str]]] ntp_servers: List of NTP servers to configure.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
"""
if domain_name is not None:
pulumi.set(__self__, "domain_name", domain_name)
if domain_name_servers is not None:
pulumi.set(__self__, "domain_name_servers", domain_name_servers)
if netbios_name_servers is not None:
pulumi.set(__self__, "netbios_name_servers", netbios_name_servers)
if netbios_node_type is not None:
pulumi.set(__self__, "netbios_node_type", netbios_node_type)
if ntp_servers is not None:
pulumi.set(__self__, "ntp_servers", ntp_servers)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> Optional[pulumi.Input[str]]:
"""
the suffix domain name to use by default when resolving non Fully Qualified Domain Names. In other words, this is what ends up being the `search` value in the `/etc/resolv.conf` file.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="domainNameServers")
def domain_name_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of name servers to configure in `/etc/resolv.conf`. If you want to use the default AWS nameservers you should set this to `AmazonProvidedDNS`.
"""
return pulumi.get(self, "domain_name_servers")
@domain_name_servers.setter
def domain_name_servers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "domain_name_servers", value)
@property
@pulumi.getter(name="netbiosNameServers")
def netbios_name_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of NETBIOS name servers.
"""
return pulumi.get(self, "netbios_name_servers")
@netbios_name_servers.setter
def netbios_name_servers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "netbios_name_servers", value)
@property
@pulumi.getter(name="netbiosNodeType")
def netbios_node_type(self) -> Optional[pulumi.Input[str]]:
"""
The NetBIOS node type (1, 2, 4, or 8). AWS recommends to specify 2 since broadcast and multicast are not supported in their network. For more information about these node types, see [RFC 2132](http://www.ietf.org/rfc/rfc2132.txt).
"""
return pulumi.get(self, "netbios_node_type")
@netbios_node_type.setter
def netbios_node_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "netbios_node_type", value)
@property
@pulumi.getter(name="ntpServers")
def ntp_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of NTP servers to configure.
"""
return pulumi.get(self, "ntp_servers")
@ntp_servers.setter
def ntp_servers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ntp_servers", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class VpcDhcpOptions(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_name: Optional[pulumi.Input[str]] = None,
domain_name_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
netbios_name_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
netbios_node_type: Optional[pulumi.Input[str]] = None,
ntp_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a VPC DHCP Options resource.
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_aws as aws
dns_resolver = aws.ec2.VpcDhcpOptions("dnsResolver", domain_name_servers=[
"8.8.8.8",
"8.8.4.4",
])
```
Full usage:
```python
import pulumi
import pulumi_aws as aws
foo = aws.ec2.VpcDhcpOptions("foo",
domain_name="service.consul",
domain_name_servers=[
"127.0.0.1",
"10.0.0.2",
],
netbios_name_servers=["127.0.0.1"],
netbios_node_type="2",
ntp_servers=["127.0.0.1"],
tags={
"Name": "foo-name",
})
```
## Remarks
* Notice that all arguments are optional but you have to specify at least one argument.
* `domain_name_servers`, `netbios_name_servers`, `ntp_servers` are limited by AWS to maximum four servers only.
* To actually use the DHCP Options Set you need to associate it to a VPC using `ec2.VpcDhcpOptionsAssociation`.
* If you delete a DHCP Options Set, all VPCs using it will be associated to AWS's `default` DHCP Option Set.
* In most cases unless you're configuring your own DNS you'll want to set `domain_name_servers` to `AmazonProvidedDNS`.
## Import
VPC DHCP Options can be imported using the `dhcp options id`, e.g.
```sh
$ pulumi import aws:ec2/vpcDhcpOptions:VpcDhcpOptions my_options dopt-d9070ebb
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] domain_name: the suffix domain name to use by default when resolving non Fully Qualified Domain Names. In other words, this is what ends up being the `search` value in the `/etc/resolv.conf` file.
:param pulumi.Input[Sequence[pulumi.Input[str]]] domain_name_servers: List of name servers to configure in `/etc/resolv.conf`. If you want to use the default AWS nameservers you should set this to `AmazonProvidedDNS`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] netbios_name_servers: List of NETBIOS name servers.
:param pulumi.Input[str] netbios_node_type: The NetBIOS node type (1, 2, 4, or 8). AWS recommends to specify 2 since broadcast and multicast are not supported in their network. For more information about these node types, see [RFC 2132](http://www.ietf.org/rfc/rfc2132.txt).
:param pulumi.Input[Sequence[pulumi.Input[str]]] ntp_servers: List of NTP servers to configure.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[VpcDhcpOptionsArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a VPC DHCP Options resource.
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_aws as aws
dns_resolver = aws.ec2.VpcDhcpOptions("dnsResolver", domain_name_servers=[
"8.8.8.8",
"8.8.4.4",
])
```
Full usage:
```python
import pulumi
import pulumi_aws as aws
foo = aws.ec2.VpcDhcpOptions("foo",
domain_name="service.consul",
domain_name_servers=[
"127.0.0.1",
"10.0.0.2",
],
netbios_name_servers=["127.0.0.1"],
netbios_node_type="2",
ntp_servers=["127.0.0.1"],
tags={
"Name": "foo-name",
})
```
## Remarks
* Notice that all arguments are optional but you have to specify at least one argument.
* `domain_name_servers`, `netbios_name_servers`, `ntp_servers` are limited by AWS to maximum four servers only.
* To actually use the DHCP Options Set you need to associate it to a VPC using `ec2.VpcDhcpOptionsAssociation`.
* If you delete a DHCP Options Set, all VPCs using it will be associated to AWS's `default` DHCP Option Set.
* In most cases unless you're configuring your own DNS you'll want to set `domain_name_servers` to `AmazonProvidedDNS`.
## Import
VPC DHCP Options can be imported using the `dhcp options id`, e.g.
```sh
$ pulumi import aws:ec2/vpcDhcpOptions:VpcDhcpOptions my_options dopt-d9070ebb
```
:param str resource_name: The name of the resource.
:param VpcDhcpOptionsArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VpcDhcpOptionsArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_name: Optional[pulumi.Input[str]] = None,
domain_name_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
netbios_name_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
netbios_node_type: Optional[pulumi.Input[str]] = None,
ntp_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['domain_name'] = domain_name
__props__['domain_name_servers'] = domain_name_servers
__props__['netbios_name_servers'] = netbios_name_servers
__props__['netbios_node_type'] = netbios_node_type
__props__['ntp_servers'] = ntp_servers
__props__['tags'] = tags
__props__['arn'] = None
__props__['owner_id'] = None
super(VpcDhcpOptions, __self__).__init__(
'aws:ec2/vpcDhcpOptions:VpcDhcpOptions',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
domain_name: Optional[pulumi.Input[str]] = None,
domain_name_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
netbios_name_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
netbios_node_type: Optional[pulumi.Input[str]] = None,
ntp_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
owner_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'VpcDhcpOptions':
"""
Get an existing VpcDhcpOptions resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN of the DHCP Options Set.
:param pulumi.Input[str] domain_name: the suffix domain name to use by default when resolving non Fully Qualified Domain Names. In other words, this is what ends up being the `search` value in the `/etc/resolv.conf` file.
:param pulumi.Input[Sequence[pulumi.Input[str]]] domain_name_servers: List of name servers to configure in `/etc/resolv.conf`. If you want to use the default AWS nameservers you should set this to `AmazonProvidedDNS`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] netbios_name_servers: List of NETBIOS name servers.
:param pulumi.Input[str] netbios_node_type: The NetBIOS node type (1, 2, 4, or 8). AWS recommends to specify 2 since broadcast and multicast are not supported in their network. For more information about these node types, see [RFC 2132](http://www.ietf.org/rfc/rfc2132.txt).
:param pulumi.Input[Sequence[pulumi.Input[str]]] ntp_servers: List of NTP servers to configure.
:param pulumi.Input[str] owner_id: The ID of the AWS account that owns the DHCP options set.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["domain_name"] = domain_name
__props__["domain_name_servers"] = domain_name_servers
__props__["netbios_name_servers"] = netbios_name_servers
__props__["netbios_node_type"] = netbios_node_type
__props__["ntp_servers"] = ntp_servers
__props__["owner_id"] = owner_id
__props__["tags"] = tags
return VpcDhcpOptions(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the DHCP Options Set.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Output[Optional[str]]:
"""
the suffix domain name to use by default when resolving non Fully Qualified Domain Names. In other words, this is what ends up being the `search` value in the `/etc/resolv.conf` file.
"""
return pulumi.get(self, "domain_name")
@property
@pulumi.getter(name="domainNameServers")
def domain_name_servers(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of name servers to configure in `/etc/resolv.conf`. If you want to use the default AWS nameservers you should set this to `AmazonProvidedDNS`.
"""
return pulumi.get(self, "domain_name_servers")
@property
@pulumi.getter(name="netbiosNameServers")
def netbios_name_servers(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of NETBIOS name servers.
"""
return pulumi.get(self, "netbios_name_servers")
@property
@pulumi.getter(name="netbiosNodeType")
def netbios_node_type(self) -> pulumi.Output[Optional[str]]:
"""
The NetBIOS node type (1, 2, 4, or 8). AWS recommends to specify 2 since broadcast and multicast are not supported in their network. For more information about these node types, see [RFC 2132](http://www.ietf.org/rfc/rfc2132.txt).
"""
return pulumi.get(self, "netbios_node_type")
@property
@pulumi.getter(name="ntpServers")
def ntp_servers(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of NTP servers to configure.
"""
return pulumi.get(self, "ntp_servers")
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> pulumi.Output[str]:
"""
The ID of the AWS account that owns the DHCP options set.
"""
return pulumi.get(self, "owner_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 46.757794
| 282
| 0.647092
| 2,473
| 19,498
| 4.905378
| 0.100283
| 0.087956
| 0.069244
| 0.055643
| 0.839832
| 0.808013
| 0.786003
| 0.75575
| 0.724013
| 0.698541
| 0
| 0.010055
| 0.250231
| 19,498
| 416
| 283
| 46.870192
| 0.819755
| 0.418043
| 0
| 0.413265
| 1
| 0
| 0.099911
| 0.003667
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.005102
| 0.02551
| 0.010204
| 0.265306
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d2d3b8fee50998ead9f60f8a4c8bc63b93ddca73
| 183
|
py
|
Python
|
deepweights/__init__.py
|
astromer-science/python-library
|
554b95129b801d7b21f53eb201db1e7cd0e1ef21
|
[
"MIT"
] | null | null | null |
deepweights/__init__.py
|
astromer-science/python-library
|
554b95129b801d7b21f53eb201db1e7cd0e1ef21
|
[
"MIT"
] | null | null | null |
deepweights/__init__.py
|
astromer-science/python-library
|
554b95129b801d7b21f53eb201db1e7cd0e1ef21
|
[
"MIT"
] | null | null | null |
from .core.astromer import *
from .core.data import *
from .core.utils import *
from .core.training.callbacks import get_callbacks
from .core.training.scheduler import CustomSchedule
| 30.5
| 51
| 0.808743
| 25
| 183
| 5.88
| 0.44
| 0.272109
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10929
| 183
| 5
| 52
| 36.6
| 0.90184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9609f5c9893f15b43fc260c7ee4047d42d486de9
| 1,341
|
py
|
Python
|
Apps/Shop/models.py
|
Martin-Antonio/Store
|
d926acccb99f25f19fd3ec699f5b8933cf9d7ac7
|
[
"Apache-2.0"
] | null | null | null |
Apps/Shop/models.py
|
Martin-Antonio/Store
|
d926acccb99f25f19fd3ec699f5b8933cf9d7ac7
|
[
"Apache-2.0"
] | null | null | null |
Apps/Shop/models.py
|
Martin-Antonio/Store
|
d926acccb99f25f19fd3ec699f5b8933cf9d7ac7
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
# Create your models here.
class Shop_Men(models.Model):
Nombre = models.CharField(max_length=100)
cantidad =models.IntegerField()
Precio = models.IntegerField()
imagen1 = models.ImageField(upload_to="media/Image")
imagen2 = models.ImageField(upload_to="media/Image")
imagen3 = models.ImageField(upload_to="media/Image")
imagen4= models.ImageField(upload_to="media/Image")
descripcion = models.CharField(max_length=50)
Telefono=models.ManyToManyField('Contacto')
def __str__(self):
num=str(self.cantidad)
vista=self.Nombre +": " +'Cantidad en existencia'+' :'+ num
return vista
class Shop_body(models.Model):
Nombre = models.CharField(max_length=100)
cantidad =models.IntegerField()
Precio = models.IntegerField()
imagen1 = models.ImageField(upload_to="media/Image")
imagen2 = models.ImageField(upload_to="media/Image")
imagen3 = models.ImageField(upload_to="media/Image")
imagen4= models.ImageField(upload_to="media/Image")
descripcion = models.CharField(max_length=50)
Telefono=models.ManyToManyField('Contacto')
def __str__(self):
num=str(self.cantidad)
vista=self.Nombre +": " +'Cantidad en existencia'+' :'+ num
return vista
class Contacto(models.Model):
Telefono=models.CharField(max_length=11,primary_key=True)
def __str__(self):
return self.Telefono
| 27.9375
| 62
| 0.756898
| 170
| 1,341
| 5.805882
| 0.276471
| 0.129686
| 0.178318
| 0.194529
| 0.838906
| 0.838906
| 0.838906
| 0.838906
| 0.838906
| 0.838906
| 0
| 0.016736
| 0.108874
| 1,341
| 47
| 63
| 28.531915
| 0.809205
| 0.017897
| 0
| 0.818182
| 0
| 0
| 0.120152
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.030303
| 0.030303
| 0.878788
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
8259d0ae93d9548931787a07b8e27035a6410e39
| 9,979
|
py
|
Python
|
data_preprocess_with_negatives.py
|
gavruskin/microinteractions
|
bafc755cbed50837984fca2bb78111592985d6d6
|
[
"MIT"
] | 1
|
2018-09-07T02:39:49.000Z
|
2018-09-07T02:39:49.000Z
|
data_preprocess_with_negatives.py
|
gavruskin/microinteractions
|
bafc755cbed50837984fca2bb78111592985d6d6
|
[
"MIT"
] | null | null | null |
data_preprocess_with_negatives.py
|
gavruskin/microinteractions
|
bafc755cbed50837984fca2bb78111592985d6d6
|
[
"MIT"
] | null | null | null |
import pandas as pd
data = pd.read_csv("fitness_summary_all_replicates.csv")
# Add all parameters (Taylor coefficients) as 0 in rows following the data:
for i in range(data.shape[0]):
for j in range(16, 48):
data.set_value(i, j, 0)
data.rename(columns={16: "a", 17: "a1", 18: "a2", 19: "a3", 20: "a4", 21: "a5",
22: "b12", 23: "b13", 24: "b14", 25: "b15", 26: "b23", 27: "b24",
28: "b25", 29: "b34", 30: "b35", 31: "b45", 32: "c123", 33: "c124",
34: "c125", 35: "c134", 36: "c135", 37: "c145", 38: "c234", 39: "c235",
40: "c245", 41: "c345", 42: "d1234", 43: "d1235", 44: "d1245",
45: "d1345", 46: "d2345", 47: "e12345"}, inplace=True)
# Change coefficients corresponding to present effects to 1:
for index, row in data.iterrows():
species = row["LP"] + row["LB"] + row["AP"] + row["AT"] + row["AO"]
if species == "YNNNN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
if species == "NYNNN":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
if species == "NNYNN":
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
if species == "NNNYN":
data.set_value(index, "a", 1)
data.set_value(index, "a4", 1)
if species == "NNNNY":
data.set_value(index, "a", 1)
data.set_value(index, "a5", 1)
if species == "YYNNN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "b12", -1)
if species == "YNYNN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b13", -1)
if species == "YNNYN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b14", -1)
if species == "YNNNY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b15", -1)
if species == "NYYNN":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b23", -1)
if species == "NYNYN":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b24", -1)
if species == "NYNNY":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b25", -1)
if species == "NNYYN":
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b34", -1)
if species == "NNYNY":
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b35", -1)
if species == "NNNYY":
data.set_value(index, "a", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b45", -1)
if species == "YYYNN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b13", -1)
data.set_value(index, "b23", -1)
data.set_value(index, "c123", 1)
if species == "YYNYN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "c124", 1)
if species == "YYNNY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "c125", 1)
if species == "NYYYN":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b23", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "c234", 1)
if species == "NNYYY":
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b34", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c345", 1)
if species == "YNYYN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b13", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "c134", 1)
if species == "YNYNY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b13", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "c135", 1)
if species == "YNNYY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b14", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c145", 1)
if species == "NYNYY":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b24", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c245", 1)
if species == "NYYNY":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b23", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "c235", 1)
if species == "YYYYN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b13", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b23", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "c123", 1)
data.set_value(index, "c124", 1)
data.set_value(index, "c134", 1)
data.set_value(index, "c234", 1)
data.set_value(index, "d1234", -1)
if species == "YYYNY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b13", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b23", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "c123", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "d1235", -1)
if species == "YYNYY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c124", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "d1245", -1)
if species == "YNYYY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b13", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c134", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d1345", -1)
if species == "NYYYY":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b23", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c234", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d2345", -1)
if species == "YYYYY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b13", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b23", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c123", 1)
data.set_value(index, "c124", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c134", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c234", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d1234", -1)
data.set_value(index, "d1235", -1)
data.set_value(index, "d1245", -1)
data.set_value(index, "d1345", -1)
data.set_value(index, "d2345", -1)
data.set_value(index, "e12345", 1)
if species == "NNNNN":
data.set_value(index, "a", 1)
data.to_csv("fitness_summary_all_replicates_parameters.csv", sep="\t")
| 33.712838
| 79
| 0.626917
| 1,703
| 9,979
| 3.524956
| 0.085731
| 0.284524
| 0.487756
| 0.688156
| 0.846077
| 0.83325
| 0.83325
| 0.792604
| 0.792604
| 0.792604
| 0
| 0.095114
| 0.15713
| 9,979
| 295
| 80
| 33.827119
| 0.618595
| 0.013228
| 0
| 0.83737
| 0
| 0
| 0.102713
| 0.008026
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00346
| 0
| 0.00346
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
826277cca31b1278c1763a1ffc675066636acba9
| 305
|
py
|
Python
|
riptide/tests/integration/__init__.py
|
theCapypara/riptide-lib
|
560106d4196cdc5a5b84235f32ac44c80bc3994e
|
[
"MIT"
] | 4
|
2019-04-23T17:14:00.000Z
|
2019-12-22T11:55:31.000Z
|
riptide/tests/integration/__init__.py
|
theCapypara/riptide-lib
|
560106d4196cdc5a5b84235f32ac44c80bc3994e
|
[
"MIT"
] | 15
|
2021-09-22T09:40:42.000Z
|
2022-03-07T05:01:07.000Z
|
riptide/tests/integration/__init__.py
|
theCapypara/riptide-lib
|
560106d4196cdc5a5b84235f32ac44c80bc3994e
|
[
"MIT"
] | 1
|
2019-11-24T18:08:14.000Z
|
2019-11-24T18:08:14.000Z
|
from .config_test import *
from .engine_cmd_test import *
from .engine_exec_test import *
from .engine_service_test import *
from .engine_start_stop_test import *
from .engine_util_test import *
from .perf_dont_sync_named_volumes_with_host_test import *
from .perf_dont_sync_unimportant_src_test import *
| 33.888889
| 58
| 0.842623
| 48
| 305
| 4.854167
| 0.416667
| 0.343348
| 0.420601
| 0.429185
| 0.223176
| 0.223176
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104918
| 305
| 8
| 59
| 38.125
| 0.85348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
829b893138127c1ba13d3d19dea8b574dfc013ac
| 4,264
|
py
|
Python
|
parser/team19/BDTytus/TypeCheck/Atributo.py
|
18SebastianVC/tytus
|
2b22f4339356b6cf46e3235a5219f68e5ba5573b
|
[
"MIT"
] | null | null | null |
parser/team19/BDTytus/TypeCheck/Atributo.py
|
18SebastianVC/tytus
|
2b22f4339356b6cf46e3235a5219f68e5ba5573b
|
[
"MIT"
] | null | null | null |
parser/team19/BDTytus/TypeCheck/Atributo.py
|
18SebastianVC/tytus
|
2b22f4339356b6cf46e3235a5219f68e5ba5573b
|
[
"MIT"
] | null | null | null |
class Atributo:
def __init__(self,nombre,tipo):
self.columnNumber = None
self.nombre = nombre
self.tipo = tipo
self.isPrimary = False
self.ForeignTable = None
self.default = None
self.isNull = True
self.isUnique = False
#Punteros
self.siguiente = None
self.anterior = None
@classmethod
def iniciar_esPrimary(cls,nombre,tipo):
nuevo = cls.__new__(cls)
nuevo.nombre = nombre
nuevo.tipo = tipo
nuevo.isPrimary = True
nuevo.ForeignTable = None
nuevo.default = None
nuevo.isNull = False
nuevo.isUnique = True
#Punteros
nuevo.siguiente = None
nuevo.anterior = None
return nuevo
@classmethod
def iniciar_esForeign(cls,nombre,tipo, tabla):
nuevo = cls.__new__(cls)
nuevo.nombre = nombre
nuevo.tipo = tipo
nuevo.isPrimary = False
nuevo.ForeignTable = tabla
nuevo.default = None
nuevo.isNull = False
nuevo.isUnique = False
#Punteros
nuevo.siguiente = None
nuevo.anterior = None
return nuevo
@classmethod
def iniciar_Default(cls,nombre,tipo,default):
nuevo = cls.__new__(cls)
nuevo.nombre = nombre
nuevo.tipo = tipo
nuevo.isPrimary = False
nuevo.foreignTable = None
nuevo.default = default
nuevo.isNull = False
nuevo.isUnique = False
#Punteros
nuevo.siguiente = None
nuevo.anterior = None
return nuevo
@classmethod
def iniciar_NotNull(cls,nombre,tipo):
nuevo = cls.__new__(cls)
nuevo.nombre = nombre
nuevo.tipo = tipo
nuevo.isPrimary = False
nuevo.ForeignTable = None
nuevo.default = None
nuevo.isNull = True
nuevo.isUnique = False
#Punteros
nuevo.siguiente = None
nuevo.anterior = None
return nuevo
@classmethod
def iniciar_esUnique(cls,nombre,tipo):
nuevo = cls.__new__(cls)
nuevo.nombre = nombre
nuevo.tipo = tipo
nuevo.isPrimary = False
nuevo.ForeignTable = None
nuevo.default = None
nuevo.isNull = True
nuevo.isUnique = True
#Punteros
nuevo.siguiente = None
nuevo.anterior = None
return nuevo
@classmethod
def iniciar_Primary_Default(cls,nombre,tipo,default):
nuevo = cls.__new__(cls)
nuevo.nombre = nombre
nuevo.tipo = tipo
nuevo.isPrimary = True
nuevo.ForeignTable = None
nuevo.default = default
nuevo.isNull = False
nuevo.isUnique = True
#Punteros
nuevo.siguiente = None
nuevo.anterior = None
return nuevo
@classmethod
def iniciar_Default_NotNull_Unique(cls,nombre,tipo,default):
nuevo = cls.__new__(cls)
nuevo.nombre = nombre
nuevo.tipo = tipo
nuevo.isPrimary = False
nuevo.ForeignTable = None
nuevo.default = default
nuevo.isNull = False
nuevo.isUnique = True
#Punteros
nuevo.siguiente = None
nuevo.anterior = None
return nuevo
@classmethod
def iniciar_Default_Null(cls,nombre,tipo,default):
nuevo = cls.__new__(cls)
nuevo.nombre = nombre
nuevo.tipo = tipo
nuevo.isPrimary = False
nuevo.ForeignTable = None
nuevo.default = default
nuevo.isNull = True
nuevo.isUnique = False
#Punteros
nuevo.siguiente = None
nuevo.anterior = None
return nuevo
@classmethod
def iniciar_Solo_Default(cls,default:any):
nuevo = cls.__new__(cls)
nuevo.columnNumber = None
nuevo.nombre = None
nuevo.tipo = None
nuevo.isPrimary = False
nuevo.ForeignTable = None
nuevo.default = default
nuevo.isNull = True
nuevo.isUnique = False
# Punteros
nuevo.siguiente = None
nuevo.anterior = None
return nuevo
| 28.61745
| 65
| 0.565901
| 420
| 4,264
| 5.616667
| 0.083333
| 0.091564
| 0.080119
| 0.053412
| 0.84061
| 0.832556
| 0.832556
| 0.832556
| 0.824078
| 0.824078
| 0
| 0
| 0.364916
| 4,264
| 149
| 66
| 28.61745
| 0.871123
| 0.018996
| 0
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7dc969534b7023960301ad310a9b9ad32f93f669
| 192
|
py
|
Python
|
aplpy/tests/setup_package.py
|
GiantMolecularCloud/aplpy
|
352fdd7fc776ebcb9058451e0b3aced777083257
|
[
"MIT"
] | null | null | null |
aplpy/tests/setup_package.py
|
GiantMolecularCloud/aplpy
|
352fdd7fc776ebcb9058451e0b3aced777083257
|
[
"MIT"
] | null | null | null |
aplpy/tests/setup_package.py
|
GiantMolecularCloud/aplpy
|
352fdd7fc776ebcb9058451e0b3aced777083257
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, print_function, division
def get_package_data():
return {_ASTROPY_PACKAGE_NAME_ + '.tests': ['coveragerc', 'data/*.reg', 'data/*/*.hdr']} # noqa
| 32
| 100
| 0.713542
| 23
| 192
| 5.434783
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 192
| 5
| 101
| 38.4
| 0.744048
| 0.020833
| 0
| 0
| 0
| 0
| 0.204301
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
7df1fa0a56bbccaf3bbe6146a571c01b2168fd89
| 183
|
py
|
Python
|
tests/project/app/views.py
|
j4mie/django-kronos
|
71d90a67eb73e9c28666e77611466062ff3e3dda
|
[
"MIT"
] | 1
|
2015-11-05T11:45:52.000Z
|
2015-11-05T11:45:52.000Z
|
tests/project/app/views.py
|
j4mie/django-kronos
|
71d90a67eb73e9c28666e77611466062ff3e3dda
|
[
"MIT"
] | null | null | null |
tests/project/app/views.py
|
j4mie/django-kronos
|
71d90a67eb73e9c28666e77611466062ff3e3dda
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from fandjango.decorators import facebook_authorization_required
@facebook_authorization_required()
def home(request):
return HttpResponse()
| 26.142857
| 64
| 0.846995
| 20
| 183
| 7.55
| 0.7
| 0.278146
| 0.384106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098361
| 183
| 7
| 65
| 26.142857
| 0.915152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
8158dc55cb159717eecbb154245a7f37058d73c8
| 17,242
|
py
|
Python
|
mrt_worker/mrt_worker/policy/actor_critic.py
|
zmk5/multi_robot_trainer
|
b85f668c1302040717d0129f092558279bec5237
|
[
"MIT"
] | 20
|
2020-11-10T02:53:42.000Z
|
2022-02-16T09:23:57.000Z
|
mrt_worker/mrt_worker/policy/actor_critic.py
|
zmk5/multi_robot_trainer
|
b85f668c1302040717d0129f092558279bec5237
|
[
"MIT"
] | null | null | null |
mrt_worker/mrt_worker/policy/actor_critic.py
|
zmk5/multi_robot_trainer
|
b85f668c1302040717d0129f092558279bec5237
|
[
"MIT"
] | 5
|
2020-11-10T02:02:58.000Z
|
2021-12-11T03:51:36.000Z
|
"""Actor-Critic policy class for RL experiments with neural net function approx.
This network uses a shared network architecture, i.e. a singular network that
has two ouputs: one for the actor and one for the critic.
Written by: Zahi Kakish (zmk5)
"""
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import tensorflow as tf
from tensorflow import keras
from mean_field_msgs.srv import Gradients
from mean_field_msgs.srv import Weights
from mrt_worker.policy.models import ActorCriticModel
from mrt_worker.policy.models import ActorModel
from mrt_worker.policy.models import CriticModel
from mrt_worker.policy.reinforce import WorkerPolicyREINFORCE
STATE = 0
ACTION = 1
REWARD = 2
NEXT_STATE = 3
NEXT_ACTION = 4
DONE = 5
class WorkerPolicyActorCriticShared(WorkerPolicyREINFORCE):
"""Actor-Critic Shared Network Class containing relatvent RL information."""
def __init__(
self,
n_states: int,
n_actions: int,
alpha: float,
gamma: float,
hidden_layer_sizes: List[int],
use_gpu: bool = False) -> None:
"""Initialize the ModelActorCritic class."""
super().__init__(
n_states, n_actions, alpha, gamma, hidden_layer_sizes, use_gpu)
# Use Actor-Critic model instead of that within WorkerPolicyREINFORCE.
self._neural_net = ActorCriticModel(
n_states, n_actions, hidden_layer_sizes)
# Huber loss
self._loss_function = keras.losses.Huber(
reduction=keras.losses.Reduction.SUM)
# Build and compile Actor-Critic model
self._neural_net.build((1, n_states))
@property
def atype(self):
"""Return type of RL algorithm as string."""
return 'A2C'
def train(
self,
batch: Tuple[np.ndarray],
batch_size: int = 16) -> None:
"""Train the policy based on a sample batch."""
# _, values_pred = self._neural_net(batch[STATE])
_, next_values_pred = self._neural_net(batch[NEXT_STATE])
returns = self.calculate_nstep_returns(
batch, batch_size, next_values_pred)
# returns = self.calculate_gae_returns(
# batch, batch_size, values_pred, next_values_pred)
with tf.GradientTape() as tape:
# Compute the action probs and value for current and next state.
action_logits, values = self._neural_net(batch[STATE])
action_probs = tf.nn.softmax(action_logits)
# print(f'Grads: {[var.name for var in tape.watched_variables()]}')
# Compute the returns and loss.
loss = self.calculate_actor_critic_loss(
batch[ACTION], action_probs, returns, values, batch_size)
# Calculate and apply graidents.
self._gradients = tape.gradient(
loss, self._neural_net.trainable_variables)
def calculate_nstep_returns(
self,
batch: Tuple[np.ndarray],
batch_size: int,
next_v_pred: tf.Tensor) -> np.ndarray:
"""Calculate n-step advantage returns."""
ret_value = np.zeros_like(batch[REWARD])
# future_ret = next_v_pred.numpy()[-1]
# print(f'Future Return: {future_ret}')
future_ret = 0.0
for t in reversed(range(batch_size + 1)):
ret_value[t] = future_ret = batch[REWARD][t] + \
self._gamma * future_ret * (1 - batch[DONE][t])
return ret_value
def calculate_gae_returns(
self,
batch: Tuple[np.ndarray],
batch_size: int,
v_preds: tf.Tensor,
next_v_pred: tf.Tensor) -> np.ndarray:
"""Calculate Generalaized Advantage Estimation (GAE) returns."""
gaes = np.zeros_like(batch[REWARD])
future_gae = 0.0
for t in reversed(range(batch_size + 1)):
delta = batch[REWARD][t] + self._gamma * next_v_pred[t] * (1 - batch[DONE][t]) - v_preds[t]
gaes[t] = future_gae = delta + self._gamma * 0.95 * (1 - batch[DONE][t]) * future_gae # lambda = 0.95
return gaes
def calculate_actor_critic_loss(
self,
action_batch: np.ndarray,
action_probs: Union[np.ndarray, tf.Tensor],
returns: Union[np.ndarray, tf.Tensor],
values: Union[np.ndarray, tf.Tensor],
batch_size: int) -> tf.Tensor:
"""Calculate the Actor-Critic network loss."""
advantage = returns - values
action_log_probs = tf.math.log(action_probs)
idx = tf.Variable(
np.append(np.arange(batch_size + 1).reshape(batch_size + 1, 1),
action_batch, axis=1),
dtype=tf.int32
)
act_log_probs = tf.reshape(
tf.gather_nd(action_log_probs, idx), (batch_size + 1, 1))
actor_loss = -1 * tf.math.reduce_sum(act_log_probs * advantage)
print(f'Actor Loss: {actor_loss}')
critic_loss = self._loss_function(values, returns)
print(f'Critic Loss: {critic_loss}')
print(f'Loss: {actor_loss + critic_loss}')
return actor_loss + critic_loss
def act(
self,
state: np.ndarray,
epsilon: Optional[float] = None) -> Union[int, np.integer]:
"""Apply the policy for a ROS inference service request."""
_ = epsilon # Unused by REINFORCE
dist_parameters, _ = self._neural_net(state)
return tf.random.categorical(dist_parameters, 1)[0, 0].numpy()
def transfer_gradients(
self,
request: Gradients.Request) -> Gradients.Request:
"""Transfer calculated gradients to Gradients srv file."""
request.layer.input_layer = (self._gradients[0].numpy()).flatten().tolist()
request.layer.hidden_0 = (self._gradients[1].numpy()).flatten().tolist()
request.layer.middle_0 = (self._gradients[2].numpy()).flatten().tolist()
request.layer.hidden_1 = (self._gradients[3].numpy()).flatten().tolist()
request.layer.output_layer = (self._gradients[4].numpy()).flatten().tolist()
request.layer.output = (self._gradients[5].numpy()).flatten().tolist()
request.layer.critic_output_layer = (self._gradients[6].numpy()).flatten().tolist()
request.layer.critic_output = (self._gradients[7].numpy()).flatten().tolist()
return request
def parse_and_set_policy_weights(
self,
response: Weights.Response()) -> None:
"""Parse and set neural network weights from srv response."""
weights = []
weights.append(
np.array(response.layer.input_layer).reshape(
self._n_states,
self._hidden_layer_sizes[0]))
weights.append(np.array(response.layer.hidden_0))
weights.append(np.array(response.layer.middle_0).reshape(
self._hidden_layer_sizes[0],
self._hidden_layer_sizes[1]))
weights.append(np.array(response.layer.hidden_1))
weights.append(np.array(response.layer.output_layer).reshape(
self._hidden_layer_sizes[1],
self._n_actions))
weights.append(np.array(response.layer.output))
weights.append(np.array(response.layer.critic_output_layer).reshape(
self._hidden_layer_sizes[1],
1))
weights.append(np.array(response.layer.critic_output))
self.set_policy_weights(weights)
class WorkerPolicyActorCriticDual(WorkerPolicyREINFORCE):
"""Actor-Critic Class containing all relatvent RL information."""
def __init__(
self,
n_states: int,
n_actions: int,
alpha: float,
gamma: float,
hidden_layer_sizes: List[int],
use_gpu: bool = False) -> None:
"""Initialize the ModelActorCritic class."""
super().__init__(n_states, n_actions, alpha, gamma, hidden_layer_sizes, use_gpu)
# Use Actor-Critic model instead of that within WorkerPolicyREINFORCE.
self._neural_net = ActorModel(n_states, n_actions, hidden_layer_sizes)
self._critic_net = CriticModel(n_states, hidden_layer_sizes)
# Huber loss
self._loss_function = keras.losses.Huber(
reduction=keras.losses.Reduction.SUM)
# Build and compile Actor-Critic model
self._neural_net.build((1, n_states))
self._critic_net.build((1, n_states))
# Create additional variable for critic gradient.
self._critic_gradients: List[np.ndarray] = []
@property
def atype(self) -> str:
"""Return type of RL algorithm as string."""
return 'A2C'
def train(
self,
batch: Tuple[np.ndarray],
batch_size: int = 16) -> None:
"""Train the soft actor-critic policy based on a sample batch."""
# values_pred = self._critic_net(batch[STATE])
next_values_pred = self._critic_net(batch[NEXT_STATE])
returns = self.calculate_nstep_returns(
batch, batch_size, next_values_pred)
# returns = self.calculate_gae_returns(
# batch, batch_size, values_pred, next_values_pred)
self.train_actor(returns, batch, batch_size)
self.train_critic(returns, batch, batch_size)
def train_actor(
self,
returns: np.ndarray,
batch: Tuple[np.ndarray],
batch_size: int = 16) -> None:
"""Train the actor policy based on a sample batch."""
values = self._critic_net(batch[STATE])
with tf.GradientTape() as tape:
# Compute the action probs and value for current and next state.
action_logits = self._neural_net(batch[STATE])
action_probs = tf.nn.softmax(action_logits)
# print(f'Grads: {[var.name for var in tape.watched_variables()]}')
# Compute the returns and loss.
loss = self.calculate_actor_loss(
batch[ACTION], action_probs, returns, values, batch_size)
# Calculate and apply graidents.
self._gradients = tape.gradient(
loss, self._neural_net.trainable_variables)
def train_critic(
self,
returns: np.ndarray,
batch: Tuple[np.ndarray],
batch_size: int = 16) -> None:
"""Train the actor policy based on a sample batch."""
_ = batch_size # TODO: Batch size is not used.
with tf.GradientTape() as tape:
# Compute the value for current and next state.
values = self._critic_net(batch[STATE])
# Compute the returns and loss.
# print(f'Grads: {[var.name for var in tape.watched_variables()]}')
loss = self.calculate_critic_loss(returns, values)
# Calculate and apply graidents.
self._critic_gradients = tape.gradient(
loss, self._critic_net.trainable_variables)
def calculate_nstep_returns(
self,
batch: Tuple[np.ndarray],
batch_size: int,
next_v_pred: tf.Tensor) -> np.ndarray:
"""Calculate n-step advantage returns."""
ret_value = np.zeros_like(batch[REWARD])
# try:
# future_ret = next_v_pred.numpy()[-1]
# print(f'Future Return: {future_ret}')
# except IndexError:
# future_ret = next_v_pred.numpy()
future_ret = 0.0
for t in reversed(range(batch_size + 1)):
ret_value[t] = future_ret = batch[REWARD][t] + \
self._gamma * future_ret * (1 - batch[DONE][t])
return ret_value
def calculate_gae_returns(
self,
batch: Tuple[np.ndarray],
batch_size: int,
v_preds: tf.Tensor,
next_v_pred: tf.Tensor) -> np.ndarray:
"""Calculate Generalaized Advantage Estimation (GAE) returns."""
gaes = np.zeros_like(batch[REWARD])
future_gae = 0.0
for t in reversed(range(batch_size + 1)):
delta = batch[REWARD][t] + self._gamma * next_v_pred[t] * (1 - batch[DONE][t]) - v_preds[t]
gaes[t] = future_gae = delta + self._gamma * 0.95 * (1 - batch[DONE][t]) * future_gae # lambda = 0.95
return gaes
def calculate_actor_loss(
self,
action_batch: np.ndarray,
action_probs: Union[np.ndarray, tf.Tensor],
returns: Union[np.ndarray, tf.Tensor],
values: Union[np.ndarray, tf.Tensor],
batch_size: int) -> tf.Tensor:
"""Calculate the Actor network loss."""
advantage = returns - values
action_log_probs = tf.math.log(action_probs)
idx = tf.Variable(
np.append(np.arange(batch_size + 1).reshape(batch_size + 1, 1),
action_batch, axis=1),
dtype=tf.int32
)
act_log_probs = tf.reshape(
tf.gather_nd(action_log_probs, idx), (batch_size + 1, 1))
# Actor Loss with Entropy
# entropy = np.sum(
# -1 * action_probs.numpy() * np.log(action_probs.numpy()), axis=1)
# print(f'Entropy: {entropy.mean()}')
# actor_loss = -1 * tf.math.reduce_sum(act_log_probs * advantage) - (0.0 * entropy.mean())
# entropy = np.sum(
# -1 * action_probs * np.log(action_probs), axis=1
# ).reshape(batch_size + 1, 1)
# actor_loss = -1 * tf.math.reduce_sum((act_log_probs * advantage) - (0.01 * entropy))
actor_loss = -1 * tf.math.reduce_sum(act_log_probs * advantage)
# print(f'Actor Loss: {actor_loss}')
return actor_loss
def calculate_critic_loss(
self,
returns: Union[np.ndarray, tf.Tensor],
values: Union[np.ndarray, tf.Tensor]) -> tf.Tensor:
"""Calculate the Critic network loss."""
critic_loss = self._loss_function(values, returns)
# print(f'Critic Loss: {critic_loss}')
return critic_loss
def transfer_gradients(
self,
request: Gradients.Request,
gradient_type: str = 'actor') -> Gradients.Request:
"""Transfer calculated gradients to Gradients srv file."""
if gradient_type == 'actor':
request.layer.input_layer = (self._gradients[0].numpy()).flatten().tolist()
request.layer.hidden_0 = (self._gradients[1].numpy()).flatten().tolist()
request.layer.middle_0 = (self._gradients[2].numpy()).flatten().tolist()
request.layer.hidden_1 = (self._gradients[3].numpy()).flatten().tolist()
request.layer.output_layer = (self._gradients[4].numpy()).flatten().tolist()
request.layer.output = (self._gradients[5].numpy()).flatten().tolist()
else:
request.layer.input_layer = (self._critic_gradients[0].numpy()).flatten().tolist()
request.layer.hidden_0 = (self._critic_gradients[1].numpy()).flatten().tolist()
request.layer.middle_0 = (self._critic_gradients[2].numpy()).flatten().tolist()
request.layer.hidden_1 = (self._critic_gradients[3].numpy()).flatten().tolist()
request.layer.output_layer = (self._critic_gradients[4].numpy()).flatten().tolist()
request.layer.output = (self._critic_gradients[5].numpy()).flatten().tolist()
return request
def parse_and_set_policy_weights(
self,
network_type: str,
response: Weights.Response()) -> None:
"""Parse and set neural network weights from srv response."""
weights = []
weights.append(
np.array(response.layer.input_layer).reshape(
self._n_states,
self._hidden_layer_sizes[0]))
weights.append(np.array(response.layer.hidden_0))
weights.append(np.array(response.layer.middle_0).reshape(
self._hidden_layer_sizes[0],
self._hidden_layer_sizes[1]))
weights.append(np.array(response.layer.hidden_1))
if network_type == 'actor':
weights.append(np.array(response.layer.output_layer).reshape(
self._hidden_layer_sizes[1],
self._n_actions))
weights.append(np.array(response.layer.output))
else:
weights.append(np.array(response.layer.output_layer).reshape(
self._hidden_layer_sizes[1],
1))
weights.append(np.array(response.layer.output))
self.set_policy_weights(network_type, weights)
def set_policy_weights(
self,
network_type: str,
network_weights: List[np.ndarray]) -> None:
"""Set neural network weights for policy from list."""
if network_type == 'actor':
self._neural_net.set_weights(network_weights)
else:
self._critic_net.set_weights(network_weights)
def load_model(self, path_to_model: str) -> None:
"""Load model for inference or training use."""
self._neural_net = keras.models.load_model(path_to_model + '_actor')
self._critic_net = keras.models.load_model(path_to_model + '_critic')
| 39.00905
| 114
| 0.611936
| 2,087
| 17,242
| 4.832774
| 0.109248
| 0.027662
| 0.035693
| 0.042138
| 0.811223
| 0.786238
| 0.753817
| 0.718521
| 0.706127
| 0.68134
| 0
| 0.010637
| 0.274794
| 17,242
| 441
| 115
| 39.097506
| 0.795985
| 0.191335
| 0
| 0.703072
| 0
| 0
| 0.008788
| 0
| 0
| 0
| 0
| 0.002268
| 0
| 1
| 0.075085
| false
| 0
| 0.044369
| 0
| 0.167235
| 0.010239
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81adc7fb70ff10b7b04bec86bb465053f62e248b
| 29,109
|
py
|
Python
|
sdk/python/pulumi_alicloud/ram/account_password_policy.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 42
|
2019-03-18T06:34:37.000Z
|
2022-03-24T07:08:57.000Z
|
sdk/python/pulumi_alicloud/ram/account_password_policy.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 152
|
2019-04-15T21:03:44.000Z
|
2022-03-29T18:00:57.000Z
|
sdk/python/pulumi_alicloud/ram/account_password_policy.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2020-08-26T17:30:07.000Z
|
2021-07-05T01:37:45.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['AccountPasswordPolicyArgs', 'AccountPasswordPolicy']
@pulumi.input_type
class AccountPasswordPolicyArgs:
def __init__(__self__, *,
hard_expiry: Optional[pulumi.Input[bool]] = None,
max_login_attempts: Optional[pulumi.Input[int]] = None,
max_password_age: Optional[pulumi.Input[int]] = None,
minimum_password_length: Optional[pulumi.Input[int]] = None,
password_reuse_prevention: Optional[pulumi.Input[int]] = None,
require_lowercase_characters: Optional[pulumi.Input[bool]] = None,
require_numbers: Optional[pulumi.Input[bool]] = None,
require_symbols: Optional[pulumi.Input[bool]] = None,
require_uppercase_characters: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a AccountPasswordPolicy resource.
:param pulumi.Input[bool] hard_expiry: Specifies if a password can expire in a hard way. Default to false.
:param pulumi.Input[int] max_login_attempts: Maximum logon attempts with an incorrect password within an hour. Valid value range: [0-32]. Default to 5.
:param pulumi.Input[int] max_password_age: The number of days after which password expires. A value of 0 indicates that the password never expires. Valid value range: [0-1095]. Default to 0.
:param pulumi.Input[int] minimum_password_length: Minimal required length of password for a user. Valid value range: [8-32]. Default to 12.
:param pulumi.Input[int] password_reuse_prevention: User is not allowed to use the latest number of passwords specified in this parameter. A value of 0 indicates the password history check policy is disabled. Valid value range: [0-24]. Default to 0.
:param pulumi.Input[bool] require_lowercase_characters: Specifies if the occurrence of a lowercase character in the password is mandatory. Default to true.
:param pulumi.Input[bool] require_numbers: Specifies if the occurrence of a number in the password is mandatory. Default to true.
:param pulumi.Input[bool] require_symbols: (Optional Specifies if the occurrence of a special character in the password is mandatory. Default to true.
:param pulumi.Input[bool] require_uppercase_characters: Specifies if the occurrence of an uppercase character in the password is mandatory. Default to true.
"""
if hard_expiry is not None:
pulumi.set(__self__, "hard_expiry", hard_expiry)
if max_login_attempts is not None:
pulumi.set(__self__, "max_login_attempts", max_login_attempts)
if max_password_age is not None:
pulumi.set(__self__, "max_password_age", max_password_age)
if minimum_password_length is not None:
pulumi.set(__self__, "minimum_password_length", minimum_password_length)
if password_reuse_prevention is not None:
pulumi.set(__self__, "password_reuse_prevention", password_reuse_prevention)
if require_lowercase_characters is not None:
pulumi.set(__self__, "require_lowercase_characters", require_lowercase_characters)
if require_numbers is not None:
pulumi.set(__self__, "require_numbers", require_numbers)
if require_symbols is not None:
pulumi.set(__self__, "require_symbols", require_symbols)
if require_uppercase_characters is not None:
pulumi.set(__self__, "require_uppercase_characters", require_uppercase_characters)
@property
@pulumi.getter(name="hardExpiry")
def hard_expiry(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if a password can expire in a hard way. Default to false.
"""
return pulumi.get(self, "hard_expiry")
@hard_expiry.setter
def hard_expiry(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "hard_expiry", value)
@property
@pulumi.getter(name="maxLoginAttempts")
def max_login_attempts(self) -> Optional[pulumi.Input[int]]:
"""
Maximum logon attempts with an incorrect password within an hour. Valid value range: [0-32]. Default to 5.
"""
return pulumi.get(self, "max_login_attempts")
@max_login_attempts.setter
def max_login_attempts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_login_attempts", value)
@property
@pulumi.getter(name="maxPasswordAge")
def max_password_age(self) -> Optional[pulumi.Input[int]]:
"""
The number of days after which password expires. A value of 0 indicates that the password never expires. Valid value range: [0-1095]. Default to 0.
"""
return pulumi.get(self, "max_password_age")
@max_password_age.setter
def max_password_age(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_password_age", value)
@property
@pulumi.getter(name="minimumPasswordLength")
def minimum_password_length(self) -> Optional[pulumi.Input[int]]:
"""
Minimal required length of password for a user. Valid value range: [8-32]. Default to 12.
"""
return pulumi.get(self, "minimum_password_length")
@minimum_password_length.setter
def minimum_password_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "minimum_password_length", value)
@property
@pulumi.getter(name="passwordReusePrevention")
def password_reuse_prevention(self) -> Optional[pulumi.Input[int]]:
"""
User is not allowed to use the latest number of passwords specified in this parameter. A value of 0 indicates the password history check policy is disabled. Valid value range: [0-24]. Default to 0.
"""
return pulumi.get(self, "password_reuse_prevention")
@password_reuse_prevention.setter
def password_reuse_prevention(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "password_reuse_prevention", value)
@property
@pulumi.getter(name="requireLowercaseCharacters")
def require_lowercase_characters(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if the occurrence of a lowercase character in the password is mandatory. Default to true.
"""
return pulumi.get(self, "require_lowercase_characters")
@require_lowercase_characters.setter
def require_lowercase_characters(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "require_lowercase_characters", value)
@property
@pulumi.getter(name="requireNumbers")
def require_numbers(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if the occurrence of a number in the password is mandatory. Default to true.
"""
return pulumi.get(self, "require_numbers")
@require_numbers.setter
def require_numbers(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "require_numbers", value)
@property
@pulumi.getter(name="requireSymbols")
def require_symbols(self) -> Optional[pulumi.Input[bool]]:
"""
(Optional Specifies if the occurrence of a special character in the password is mandatory. Default to true.
"""
return pulumi.get(self, "require_symbols")
@require_symbols.setter
def require_symbols(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "require_symbols", value)
@property
@pulumi.getter(name="requireUppercaseCharacters")
def require_uppercase_characters(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if the occurrence of an uppercase character in the password is mandatory. Default to true.
"""
return pulumi.get(self, "require_uppercase_characters")
@require_uppercase_characters.setter
def require_uppercase_characters(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "require_uppercase_characters", value)
@pulumi.input_type
class _AccountPasswordPolicyState:
def __init__(__self__, *,
hard_expiry: Optional[pulumi.Input[bool]] = None,
max_login_attempts: Optional[pulumi.Input[int]] = None,
max_password_age: Optional[pulumi.Input[int]] = None,
minimum_password_length: Optional[pulumi.Input[int]] = None,
password_reuse_prevention: Optional[pulumi.Input[int]] = None,
require_lowercase_characters: Optional[pulumi.Input[bool]] = None,
require_numbers: Optional[pulumi.Input[bool]] = None,
require_symbols: Optional[pulumi.Input[bool]] = None,
require_uppercase_characters: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering AccountPasswordPolicy resources.
:param pulumi.Input[bool] hard_expiry: Specifies if a password can expire in a hard way. Default to false.
:param pulumi.Input[int] max_login_attempts: Maximum logon attempts with an incorrect password within an hour. Valid value range: [0-32]. Default to 5.
:param pulumi.Input[int] max_password_age: The number of days after which password expires. A value of 0 indicates that the password never expires. Valid value range: [0-1095]. Default to 0.
:param pulumi.Input[int] minimum_password_length: Minimal required length of password for a user. Valid value range: [8-32]. Default to 12.
:param pulumi.Input[int] password_reuse_prevention: User is not allowed to use the latest number of passwords specified in this parameter. A value of 0 indicates the password history check policy is disabled. Valid value range: [0-24]. Default to 0.
:param pulumi.Input[bool] require_lowercase_characters: Specifies if the occurrence of a lowercase character in the password is mandatory. Default to true.
:param pulumi.Input[bool] require_numbers: Specifies if the occurrence of a number in the password is mandatory. Default to true.
:param pulumi.Input[bool] require_symbols: (Optional Specifies if the occurrence of a special character in the password is mandatory. Default to true.
:param pulumi.Input[bool] require_uppercase_characters: Specifies if the occurrence of an uppercase character in the password is mandatory. Default to true.
"""
if hard_expiry is not None:
pulumi.set(__self__, "hard_expiry", hard_expiry)
if max_login_attempts is not None:
pulumi.set(__self__, "max_login_attempts", max_login_attempts)
if max_password_age is not None:
pulumi.set(__self__, "max_password_age", max_password_age)
if minimum_password_length is not None:
pulumi.set(__self__, "minimum_password_length", minimum_password_length)
if password_reuse_prevention is not None:
pulumi.set(__self__, "password_reuse_prevention", password_reuse_prevention)
if require_lowercase_characters is not None:
pulumi.set(__self__, "require_lowercase_characters", require_lowercase_characters)
if require_numbers is not None:
pulumi.set(__self__, "require_numbers", require_numbers)
if require_symbols is not None:
pulumi.set(__self__, "require_symbols", require_symbols)
if require_uppercase_characters is not None:
pulumi.set(__self__, "require_uppercase_characters", require_uppercase_characters)
@property
@pulumi.getter(name="hardExpiry")
def hard_expiry(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if a password can expire in a hard way. Default to false.
"""
return pulumi.get(self, "hard_expiry")
@hard_expiry.setter
def hard_expiry(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "hard_expiry", value)
@property
@pulumi.getter(name="maxLoginAttempts")
def max_login_attempts(self) -> Optional[pulumi.Input[int]]:
"""
Maximum logon attempts with an incorrect password within an hour. Valid value range: [0-32]. Default to 5.
"""
return pulumi.get(self, "max_login_attempts")
@max_login_attempts.setter
def max_login_attempts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_login_attempts", value)
@property
@pulumi.getter(name="maxPasswordAge")
def max_password_age(self) -> Optional[pulumi.Input[int]]:
"""
The number of days after which password expires. A value of 0 indicates that the password never expires. Valid value range: [0-1095]. Default to 0.
"""
return pulumi.get(self, "max_password_age")
@max_password_age.setter
def max_password_age(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_password_age", value)
@property
@pulumi.getter(name="minimumPasswordLength")
def minimum_password_length(self) -> Optional[pulumi.Input[int]]:
"""
Minimal required length of password for a user. Valid value range: [8-32]. Default to 12.
"""
return pulumi.get(self, "minimum_password_length")
@minimum_password_length.setter
def minimum_password_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "minimum_password_length", value)
@property
@pulumi.getter(name="passwordReusePrevention")
def password_reuse_prevention(self) -> Optional[pulumi.Input[int]]:
"""
User is not allowed to use the latest number of passwords specified in this parameter. A value of 0 indicates the password history check policy is disabled. Valid value range: [0-24]. Default to 0.
"""
return pulumi.get(self, "password_reuse_prevention")
@password_reuse_prevention.setter
def password_reuse_prevention(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "password_reuse_prevention", value)
@property
@pulumi.getter(name="requireLowercaseCharacters")
def require_lowercase_characters(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if the occurrence of a lowercase character in the password is mandatory. Default to true.
"""
return pulumi.get(self, "require_lowercase_characters")
@require_lowercase_characters.setter
def require_lowercase_characters(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "require_lowercase_characters", value)
@property
@pulumi.getter(name="requireNumbers")
def require_numbers(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if the occurrence of a number in the password is mandatory. Default to true.
"""
return pulumi.get(self, "require_numbers")
@require_numbers.setter
def require_numbers(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "require_numbers", value)
@property
@pulumi.getter(name="requireSymbols")
def require_symbols(self) -> Optional[pulumi.Input[bool]]:
"""
(Optional Specifies if the occurrence of a special character in the password is mandatory. Default to true.
"""
return pulumi.get(self, "require_symbols")
@require_symbols.setter
def require_symbols(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "require_symbols", value)
@property
@pulumi.getter(name="requireUppercaseCharacters")
def require_uppercase_characters(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if the occurrence of an uppercase character in the password is mandatory. Default to true.
"""
return pulumi.get(self, "require_uppercase_characters")
@require_uppercase_characters.setter
def require_uppercase_characters(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "require_uppercase_characters", value)
class AccountPasswordPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
hard_expiry: Optional[pulumi.Input[bool]] = None,
max_login_attempts: Optional[pulumi.Input[int]] = None,
max_password_age: Optional[pulumi.Input[int]] = None,
minimum_password_length: Optional[pulumi.Input[int]] = None,
password_reuse_prevention: Optional[pulumi.Input[int]] = None,
require_lowercase_characters: Optional[pulumi.Input[bool]] = None,
require_numbers: Optional[pulumi.Input[bool]] = None,
require_symbols: Optional[pulumi.Input[bool]] = None,
require_uppercase_characters: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
## Import
RAM account password policy can be imported using the `id`, e.g. bash
```sh
$ pulumi import alicloud:ram/accountPasswordPolicy:AccountPasswordPolicy example ram-account-password-policy
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] hard_expiry: Specifies if a password can expire in a hard way. Default to false.
:param pulumi.Input[int] max_login_attempts: Maximum logon attempts with an incorrect password within an hour. Valid value range: [0-32]. Default to 5.
:param pulumi.Input[int] max_password_age: The number of days after which password expires. A value of 0 indicates that the password never expires. Valid value range: [0-1095]. Default to 0.
:param pulumi.Input[int] minimum_password_length: Minimal required length of password for a user. Valid value range: [8-32]. Default to 12.
:param pulumi.Input[int] password_reuse_prevention: User is not allowed to use the latest number of passwords specified in this parameter. A value of 0 indicates the password history check policy is disabled. Valid value range: [0-24]. Default to 0.
:param pulumi.Input[bool] require_lowercase_characters: Specifies if the occurrence of a lowercase character in the password is mandatory. Default to true.
:param pulumi.Input[bool] require_numbers: Specifies if the occurrence of a number in the password is mandatory. Default to true.
:param pulumi.Input[bool] require_symbols: (Optional Specifies if the occurrence of a special character in the password is mandatory. Default to true.
:param pulumi.Input[bool] require_uppercase_characters: Specifies if the occurrence of an uppercase character in the password is mandatory. Default to true.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[AccountPasswordPolicyArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
RAM account password policy can be imported using the `id`, e.g. bash
```sh
$ pulumi import alicloud:ram/accountPasswordPolicy:AccountPasswordPolicy example ram-account-password-policy
```
:param str resource_name: The name of the resource.
:param AccountPasswordPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AccountPasswordPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
hard_expiry: Optional[pulumi.Input[bool]] = None,
max_login_attempts: Optional[pulumi.Input[int]] = None,
max_password_age: Optional[pulumi.Input[int]] = None,
minimum_password_length: Optional[pulumi.Input[int]] = None,
password_reuse_prevention: Optional[pulumi.Input[int]] = None,
require_lowercase_characters: Optional[pulumi.Input[bool]] = None,
require_numbers: Optional[pulumi.Input[bool]] = None,
require_symbols: Optional[pulumi.Input[bool]] = None,
require_uppercase_characters: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AccountPasswordPolicyArgs.__new__(AccountPasswordPolicyArgs)
__props__.__dict__["hard_expiry"] = hard_expiry
__props__.__dict__["max_login_attempts"] = max_login_attempts
__props__.__dict__["max_password_age"] = max_password_age
__props__.__dict__["minimum_password_length"] = minimum_password_length
__props__.__dict__["password_reuse_prevention"] = password_reuse_prevention
__props__.__dict__["require_lowercase_characters"] = require_lowercase_characters
__props__.__dict__["require_numbers"] = require_numbers
__props__.__dict__["require_symbols"] = require_symbols
__props__.__dict__["require_uppercase_characters"] = require_uppercase_characters
super(AccountPasswordPolicy, __self__).__init__(
'alicloud:ram/accountPasswordPolicy:AccountPasswordPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
hard_expiry: Optional[pulumi.Input[bool]] = None,
max_login_attempts: Optional[pulumi.Input[int]] = None,
max_password_age: Optional[pulumi.Input[int]] = None,
minimum_password_length: Optional[pulumi.Input[int]] = None,
password_reuse_prevention: Optional[pulumi.Input[int]] = None,
require_lowercase_characters: Optional[pulumi.Input[bool]] = None,
require_numbers: Optional[pulumi.Input[bool]] = None,
require_symbols: Optional[pulumi.Input[bool]] = None,
require_uppercase_characters: Optional[pulumi.Input[bool]] = None) -> 'AccountPasswordPolicy':
"""
Get an existing AccountPasswordPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] hard_expiry: Specifies if a password can expire in a hard way. Default to false.
:param pulumi.Input[int] max_login_attempts: Maximum logon attempts with an incorrect password within an hour. Valid value range: [0-32]. Default to 5.
:param pulumi.Input[int] max_password_age: The number of days after which password expires. A value of 0 indicates that the password never expires. Valid value range: [0-1095]. Default to 0.
:param pulumi.Input[int] minimum_password_length: Minimal required length of password for a user. Valid value range: [8-32]. Default to 12.
:param pulumi.Input[int] password_reuse_prevention: User is not allowed to use the latest number of passwords specified in this parameter. A value of 0 indicates the password history check policy is disabled. Valid value range: [0-24]. Default to 0.
:param pulumi.Input[bool] require_lowercase_characters: Specifies if the occurrence of a lowercase character in the password is mandatory. Default to true.
:param pulumi.Input[bool] require_numbers: Specifies if the occurrence of a number in the password is mandatory. Default to true.
:param pulumi.Input[bool] require_symbols: (Optional Specifies if the occurrence of a special character in the password is mandatory. Default to true.
:param pulumi.Input[bool] require_uppercase_characters: Specifies if the occurrence of an uppercase character in the password is mandatory. Default to true.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AccountPasswordPolicyState.__new__(_AccountPasswordPolicyState)
__props__.__dict__["hard_expiry"] = hard_expiry
__props__.__dict__["max_login_attempts"] = max_login_attempts
__props__.__dict__["max_password_age"] = max_password_age
__props__.__dict__["minimum_password_length"] = minimum_password_length
__props__.__dict__["password_reuse_prevention"] = password_reuse_prevention
__props__.__dict__["require_lowercase_characters"] = require_lowercase_characters
__props__.__dict__["require_numbers"] = require_numbers
__props__.__dict__["require_symbols"] = require_symbols
__props__.__dict__["require_uppercase_characters"] = require_uppercase_characters
return AccountPasswordPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="hardExpiry")
def hard_expiry(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies if a password can expire in a hard way. Default to false.
"""
return pulumi.get(self, "hard_expiry")
@property
@pulumi.getter(name="maxLoginAttempts")
def max_login_attempts(self) -> pulumi.Output[Optional[int]]:
"""
Maximum logon attempts with an incorrect password within an hour. Valid value range: [0-32]. Default to 5.
"""
return pulumi.get(self, "max_login_attempts")
@property
@pulumi.getter(name="maxPasswordAge")
def max_password_age(self) -> pulumi.Output[Optional[int]]:
"""
The number of days after which password expires. A value of 0 indicates that the password never expires. Valid value range: [0-1095]. Default to 0.
"""
return pulumi.get(self, "max_password_age")
@property
@pulumi.getter(name="minimumPasswordLength")
def minimum_password_length(self) -> pulumi.Output[Optional[int]]:
"""
Minimal required length of password for a user. Valid value range: [8-32]. Default to 12.
"""
return pulumi.get(self, "minimum_password_length")
@property
@pulumi.getter(name="passwordReusePrevention")
def password_reuse_prevention(self) -> pulumi.Output[Optional[int]]:
"""
User is not allowed to use the latest number of passwords specified in this parameter. A value of 0 indicates the password history check policy is disabled. Valid value range: [0-24]. Default to 0.
"""
return pulumi.get(self, "password_reuse_prevention")
@property
@pulumi.getter(name="requireLowercaseCharacters")
def require_lowercase_characters(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies if the occurrence of a lowercase character in the password is mandatory. Default to true.
"""
return pulumi.get(self, "require_lowercase_characters")
@property
@pulumi.getter(name="requireNumbers")
def require_numbers(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies if the occurrence of a number in the password is mandatory. Default to true.
"""
return pulumi.get(self, "require_numbers")
@property
@pulumi.getter(name="requireSymbols")
def require_symbols(self) -> pulumi.Output[Optional[bool]]:
"""
(Optional Specifies if the occurrence of a special character in the password is mandatory. Default to true.
"""
return pulumi.get(self, "require_symbols")
@property
@pulumi.getter(name="requireUppercaseCharacters")
def require_uppercase_characters(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies if the occurrence of an uppercase character in the password is mandatory. Default to true.
"""
return pulumi.get(self, "require_uppercase_characters")
| 54.307836
| 257
| 0.69518
| 3,548
| 29,109
| 5.470688
| 0.05637
| 0.068573
| 0.079289
| 0.053323
| 0.908398
| 0.897476
| 0.897476
| 0.892375
| 0.889748
| 0.881247
| 0
| 0.006488
| 0.216394
| 29,109
| 535
| 258
| 54.409346
| 0.844454
| 0.349514
| 0
| 0.845912
| 1
| 0
| 0.134095
| 0.075798
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163522
| false
| 0.286164
| 0.015723
| 0
| 0.27673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
81c5f5684cf9b3da0fcab6b865a28e4a394e93ab
| 86,043
|
py
|
Python
|
tests/functional/transactions/test_read_consist_sttm_restart_max_limit.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/functional/transactions/test_read_consist_sttm_restart_max_limit.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/functional/transactions/test_read_consist_sttm_restart_max_limit.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
#coding:utf-8
#
# id: functional.transactions.read_consist_sttm_restart_max_limit
# title: READ CONSISTENCY. Maximal number of statement-level restarts must be 10.
# decription:
# Initial article for reading:
# https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852
# Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here
# to: LOCKER-1, WORKER and LOCKER-2 respectively.
#
# See also: doc\\README.read_consistency.md
# Letter from Vlad: 15.09.2020 20:04 // subj "read consistency // addi test(s)"
#
# ::: NB :::
# This test uses script %FBT_REPO%
# iles
# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests.
# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual
# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any
# outcome of test.
#
# Detailed description can be found in "read-consist-sttm-restart-on-update-04.fbt", this test is based on the same ideas:
# * initial script add records with ID = 1...12 and does commit;
# * start locker-1 which catch record with ID = 1 that is to be involved futher in cursor of worker;
# * start worker DML which must change records in descending order of ID, starting with ID=2; worker must write ID = ID * 100 for each row;
# * start locker-2 which changes record with ID=12 by assigning this ID to -12, makes COMMIT and locks this record again (makes UPDATE w/o commit);
# * locker-1 releases record with ID=1, then changes record with ID=11 by assigning this ID to -11, makes COMMIT and locks this record again;
# * locker-2 releases record with ID=-12, then changes record with ID=10 by assigning this ID to -10, makes COMMIT and locks this record again;
# * ... and so on, until number of such actions iterations less 10 or 11 (see below) ...
#
# Each UPDATE that is performed by lockers (starting from ID=11) produces new ID (-11, -10, -9, ...) that was not present in the scope which worker
# could see before this action. This forces worker to make statement-level restart.
#
# When number of such new IDs is less than 10 then worker must finish its job successfully.
# But if this number if 11 then worker must raise exception (SQLSTATE = 40001 / deadlock / update conflicts) and rollback all changes.
#
# Test verifies both cases, using loop with TWO iterations (see 'main_iter' below): first for 10 and second to 11 records that are to be updated.
# After each iteration we do queries to the table TEST and to the view V_WORKER_LOG which contains data generated by trigger TLOG_DONE for logging.
#
# Test verifies restart number for three modes of WORKER job: UPDATE, MERGE, DELETE and SELECT WITH LOCK (see loop for checked_DML: 'upd', 'mer', 'del', 'lok').
# NOTE-1.
# For 'SELECT WITH LOCK' we must provide that no rows will be returned to client while worker is waiting for records.
# EXECUTE BLOCK with for-select which does nothing is used for this.
#
# NOTE-2.
# SELECT WITH LOCK does not allow to use VIEW as subject of query (raises "-WITH LOCK can be used only with a single physical table").
# This error is expected in current FB versions and its text presents in expected_std* section.
#
# Checked on 4.0.0.2195 SS/CS.
# 29.09.2020: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'target_object_type'.
#
#
# tracker_id:
# min_versions: ['4.0']
# versions: 4.0
# qmid:
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 4.0
# resources: None
substitutions_1 = [('=', ''), ('[ \t]+', ' '), ('.*After line \\d+.*', ''), ('.*[\\-]?concurrent transaction number is \\d+', 'concurrent transaction number is'), ('.*At\\s+block\\s+line(:)?\\s+\\d+(,)?\\s+col(:)?\\s+\\d+', ''), ('.After\\s+line\\s+\\d+\\s+.*', '')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
# import os
# import sys
# import subprocess
# from subprocess import Popen
# import shutil
# from fdb import services
# import time
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
# # How long LOCKER must wait before raise update-conflict error
# # (useful for debug in case os some error in this test algorithm):
# LOCKER_LOCK_TIMEOUT = 5
#
# ##############################
# # Temply, for debug obly:
# this_fdb=db_conn.database_name
# this_dbg=os.path.splitext(this_fdb)[0] + '.4debug.fdb'
# ##############################
#
# db_conn.close()
# fb_home = services.connect(host='localhost').get_home_directory()
#
# #--------------------------------------------
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
# #--------------------------------------------
#
# def cleanup( f_names_list ):
# global os
# for f in f_names_list:
# if type(f) == file:
# del_name = f.name
# elif type(f) == str:
# del_name = f
# else:
# print('Unrecognized type of element:', f, ' - can not be treated as file.')
# del_name = None
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
# #--------------------------------------------
#
# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql')
#
# for target_object_type in('table', 'view'):
#
# target_obj = 'test' if target_object_type == 'table' else 'v_test'
#
# for checked_DML in('upd', 'mer', 'del', 'lok'):
# #for checked_DML in('lok',):
# worker_dml = "select 'UNKNOWN MODE' as msg from rdb$database"
# if checked_DML == 'upd':
# worker_dml = 'update %(target_obj)s set id = id * 100 where id <= 2 order by id DESC;' % locals()
# elif checked_DML == 'mer':
# worker_dml = 'merge into %(target_obj)s t using (select x.id from %(target_obj)s x where x.id <= 2 order by id DESC) s on t.id = s.id when matched then update set t.id = s.id * 100;' % locals()
# elif checked_DML == 'del':
# worker_dml = 'delete from %(target_obj)s where id <= 2 order by id DESC;' % locals()
# elif checked_DML == 'lok':
# # ::: NB :::
# # We must SUPRESS sending record to client for SELECT WITH LOCK, otherwise error
# # deadlock/update conflist will raise immediately! Because of this, we enclose
# # such select into execute block which returns nothing:
# worker_dml = 'set term ^; execute block as declare c int; begin for select id from %(target_obj)s where id<=2 order by id desc with lock into c do begin end end^ set term ;^' % locals()
#
# for main_iter in (0,1):
# #for main_iter in (1,):
#
# ###################################################################################
# ### H O W M A N Y R E S T A R T S W E W A N T T O C H E C K ###
# ###################################################################################
# ROWS_TO_ADD = 10 + 2 * main_iter
#
#
# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w')
# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w')
#
# # RECREATION OF ALL DB OBJECTS:
# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err )
#
# flush_and_close(f_init_log)
# flush_and_close(f_init_err)
#
# sql_addi='''
# set term ^;
# execute block as
# begin
# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA');
# end
# ^
# set term ;^
# insert into %(target_obj)s(id, x) select row_number()over(),row_number()over() from rdb$types rows (2 + %(ROWS_TO_ADD)s); -- <<< INITIAL DATA
# commit;
# ''' % locals()
#
# runProgram('isql', [ dsn, '-q' ], sql_addi)
#
# locker_tpb = fdb.TPB()
# locker_tpb.lock_timeout = LOCKER_LOCK_TIMEOUT
# locker_tpb.lock_resolution = fdb.isc_tpb_wait
#
# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb )
# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb )
#
# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" )
# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" )
#
# #########################
# ### L O C K E R - 1 ###
# #########################
#
# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 1' % locals() )
#
# sql_text='''
# connect '%(dsn)s';
# set list on;
# set autoddl off;
# set term ^;
# execute block as
# begin
# rdb$set_context('USER_SESSION','WHO', 'WORKER');
# end
# ^
# set term ;^
# commit;
# SET KEEP_TRAN_PARAMS ON;
# set transaction read committed read consistency;
# set list off;
# set wng off;
#
# set count on;
# %(worker_dml)s -- UPDATE or DELETE or SELECT WITH LOCK; all ORDER BY ID DESC; MUST HANG BECAUSE OF LOCKERs
#
# -- check results:
# -- ###############
#
# select id from %(target_obj)s order by id;
#
# select v.old_id, v.op, v.snap_no_rank
# from v_worker_log v
# where v.op = iif( '%(checked_DML)s' = 'mer', 'upd', '%(checked_DML)s'); -- 'UPD' or 'DEL'; for 'SELECT WITH LOCK' no records will be in v_worker_log.
#
#
# --set width who 10;
# -- DO NOT check this! Values can differ here from one run to another!
# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id;
# rollback;
#
# ''' % dict(globals(), **locals())
#
# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_max_limit.sql'), 'w')
# f_worker_sql.write(sql_text)
# flush_and_close(f_worker_sql)
#
#
# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w')
# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w')
#
# ############################################################################
# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ###
# ############################################################################
#
# p_worker = Popen( [ context['isql_path'], '-pag', '9999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err)
# time.sleep(1)
#
# cur_lock_1 = con_lock_1.cursor()
# cur_lock_2 = con_lock_2.cursor()
# sttm = 'update %(target_obj)s set id = ? where abs( id ) = ?' % locals()
#
#
# for i in range(0,ROWS_TO_ADD):
# v_id = 2 + ROWS_TO_ADD-i
# if i % 2 == 0:
# cur_lock_2.execute( sttm, ( -abs( v_id ), v_id, ) )
# con_lock_2.commit()
# cur_lock_2.execute( sttm, ( -abs( v_id ), v_id, ) )
# con_lock_1.commit()
# else:
# cur_lock_1.execute( sttm, ( -abs( v_id ), v_id, ) )
# con_lock_1.commit()
# cur_lock_1.execute( sttm, ( -abs( v_id ), v_id, ) )
# con_lock_2.commit()
#
# cur_lock_1.close()
# cur_lock_2.close()
#
# if ROWS_TO_ADD % 2 == 0:
# con_lock_2.commit()
# con_lock_1.commit()
# else:
# con_lock_1.commit()
# con_lock_2.commit()
#
# # Close lockers:
# ################
# for c in (con_lock_1, con_lock_2):
# c.close()
#
# # Here we wait for ISQL complete its mission:
# p_worker.wait()
#
# flush_and_close(f_worker_log)
# flush_and_close(f_worker_err)
#
# # CHECK RESULTS
# ###############
#
# print( 'target_object_type: %(target_object_type)s, checked_DML = %(checked_DML)s, iter = %(main_iter)s, restarts number to be tested: %(ROWS_TO_ADD)s' % locals() )
#
# with open(f_init_err.name,'r') as f:
# for line in f:
# if line:
# print( 'target_object_type: %(target_object_type)s, checked_DML = %(checked_DML)s, iter = %(main_iter)s, UNEXPECTED STDERR for initial SQL: %(line)s' % locals() )
#
# for f in (f_worker_log, f_worker_err):
# with open(f.name,'r') as g:
# for line in g:
# if line:
# logname = 'STDLOG' if f.name == f_worker_log.name else 'STDERR'
# print( 'target_object_type: %(target_object_type)s, checked_DML = %(checked_DML)s, iter = %(main_iter)s, worker %(logname)s: %(line)s' % locals() )
#
#
# #< for main_iter in (0,1)
# # < for checked_DML in ('upd', 'mer', 'del', 'lok')
# # < for target_object_type in ('table', 'view')
# # Cleanup.
# ##########
# time.sleep(1)
# cleanup( (f_init_log, f_init_err, f_worker_sql, f_worker_log, f_worker_err) )
#
# '''
# 'substitutions':[
# ('=','')
# ,('[ ]+',' ')
# ,('.*After line \\d+.*', '')
# ,('.*[\\-]?concurrent transaction number is \\d+', 'concurrent transaction number is')
# ,('.*At\\s+block\\s+line(:)?\\s+\\d+(,)?\\s+col(:)?\\s+\\d+', '')
# ]
#
# '''
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
target_object_type: table, checked_DML = upd, iter = 0, restarts number to be tested: 10
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: Records affected: 12
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG:
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: ID
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: =======
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -1200
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -1100
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -1000
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -900
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -800
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -700
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -600
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -500
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -400
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -300
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 100
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 200
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG:
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: Records affected: 12
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG:
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: OLD_ID OP SNAP_NO_RANK
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: ======= ====== =====================
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 1
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 2
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 2
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 3
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 3
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 4
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 4
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 5
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 5
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 6
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 6
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 7
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 7
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 8
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 8
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 9
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 9
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 10
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 10
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 11
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 11
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -3 UPD 11
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -4 UPD 11
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -5 UPD 11
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -6 UPD 11
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -7 UPD 11
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -8 UPD 11
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -9 UPD 11
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -10 UPD 11
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -11 UPD 11
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: -12 UPD 11
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG:
target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: Records affected: 31
target_object_type: table, checked_DML = upd, iter = 1, restarts number to be tested: 12
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: Records affected: 2
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG:
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: ID
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: =======
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: -14
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: -13
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: -12
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: -11
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: -10
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: -9
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: -8
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: -7
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: -6
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: -5
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: -4
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: -3
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 1
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 2
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG:
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: Records affected: 14
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG:
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: OLD_ID OP SNAP_NO_RANK
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: ======= ====== =====================
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 1
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 2
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 2
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 3
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 3
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 4
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 4
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 5
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 5
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 6
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 6
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 7
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 7
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 8
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 8
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 9
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 9
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 10
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 10
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 11
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 11
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG:
target_object_type: table, checked_DML = upd, iter = 1, worker STDLOG: Records affected: 21
target_object_type: table, checked_DML = upd, iter = 1, worker STDERR: Statement failed, SQLSTATE = 40001
target_object_type: table, checked_DML = upd, iter = 1, worker STDERR: deadlock
target_object_type: table, checked_DML = upd, iter = 1, worker STDERR: -update conflicts with concurrent update
target_object_type: table, checked_DML = upd, iter = 1, worker STDERR: -concurrent transaction number is 343
target_object_type: table, checked_DML = upd, iter = 1, worker STDERR: After line 18 in file C:\\FBTESTING\\qabt-repo mp mp_sttm_restart_max_limit.sql
target_object_type: table, checked_DML = mer, iter = 0, restarts number to be tested: 10
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: Records affected: 12
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG:
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: ID
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: =======
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -1200
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -1100
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -1000
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -900
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -800
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -700
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -600
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -500
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -400
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -300
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 100
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 200
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG:
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: Records affected: 12
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG:
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: OLD_ID OP SNAP_NO_RANK
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: ======= ====== =====================
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 1
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 2
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 2
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 3
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 3
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 4
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 4
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 5
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 5
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 6
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 6
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 7
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 7
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 8
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 8
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 9
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 9
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 10
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 10
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 11
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 11
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -3 UPD 11
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -4 UPD 11
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -5 UPD 11
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -6 UPD 11
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -7 UPD 11
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -8 UPD 11
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -9 UPD 11
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -10 UPD 11
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -11 UPD 11
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: -12 UPD 11
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG:
target_object_type: table, checked_DML = mer, iter = 0, worker STDLOG: Records affected: 31
target_object_type: table, checked_DML = mer, iter = 1, restarts number to be tested: 12
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: Records affected: 2
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG:
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: ID
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: =======
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: -14
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: -13
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: -12
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: -11
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: -10
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: -9
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: -8
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: -7
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: -6
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: -5
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: -4
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: -3
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 1
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 2
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG:
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: Records affected: 14
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG:
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: OLD_ID OP SNAP_NO_RANK
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: ======= ====== =====================
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 1
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 2
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 2
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 3
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 3
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 4
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 4
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 5
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 5
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 6
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 6
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 7
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 7
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 8
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 8
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 9
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 9
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 10
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 10
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 11
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 11
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG:
target_object_type: table, checked_DML = mer, iter = 1, worker STDLOG: Records affected: 21
target_object_type: table, checked_DML = mer, iter = 1, worker STDERR: Statement failed, SQLSTATE = 40001
target_object_type: table, checked_DML = mer, iter = 1, worker STDERR: deadlock
target_object_type: table, checked_DML = mer, iter = 1, worker STDERR: -update conflicts with concurrent update
target_object_type: table, checked_DML = mer, iter = 1, worker STDERR: -concurrent transaction number is 696
target_object_type: table, checked_DML = mer, iter = 1, worker STDERR: After line 18 in file C:\\FBTESTING\\qabt-repo mp mp_sttm_restart_max_limit.sql
target_object_type: table, checked_DML = del, iter = 0, restarts number to be tested: 10
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: Records affected: 12
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: Records affected: 0
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG:
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: OLD_ID OP SNAP_NO_RANK
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: ======= ====== =====================
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 1
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 2
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 2
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 3
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 3
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 4
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 4
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 5
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 5
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 6
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 6
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 7
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 7
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 8
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 8
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 9
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 9
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 10
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 10
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 11
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 11
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: -3 DEL 11
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: -4 DEL 11
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: -5 DEL 11
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: -6 DEL 11
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: -7 DEL 11
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: -8 DEL 11
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: -9 DEL 11
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: -10 DEL 11
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: -11 DEL 11
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: -12 DEL 11
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG:
target_object_type: table, checked_DML = del, iter = 0, worker STDLOG: Records affected: 31
target_object_type: table, checked_DML = del, iter = 1, restarts number to be tested: 12
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: Records affected: 2
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG:
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: ID
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: =======
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: -14
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: -13
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: -12
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: -11
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: -10
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: -9
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: -8
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: -7
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: -6
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: -5
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: -4
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: -3
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 1
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 2
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG:
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: Records affected: 14
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG:
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: OLD_ID OP SNAP_NO_RANK
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: ======= ====== =====================
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 1
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 2
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 2
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 3
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 3
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 4
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 4
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 5
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 5
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 6
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 6
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 7
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 7
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 8
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 8
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 9
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 9
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 10
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 10
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 11
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 11
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG:
target_object_type: table, checked_DML = del, iter = 1, worker STDLOG: Records affected: 21
target_object_type: table, checked_DML = del, iter = 1, worker STDERR: Statement failed, SQLSTATE = 40001
target_object_type: table, checked_DML = del, iter = 1, worker STDERR: deadlock
target_object_type: table, checked_DML = del, iter = 1, worker STDERR: -update conflicts with concurrent update
target_object_type: table, checked_DML = del, iter = 1, worker STDERR: -concurrent transaction number is 1049
target_object_type: table, checked_DML = del, iter = 1, worker STDERR: After line 18 in file C:\\FBTESTING\\qabt-repo mp mp_sttm_restart_max_limit.sql
target_object_type: table, checked_DML = lok, iter = 0, restarts number to be tested: 10
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG:
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: ID
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: =======
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: -12
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: -11
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: -10
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: -9
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: -8
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: -7
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: -6
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: -5
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: -4
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: -3
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: 1
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: 2
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG:
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: Records affected: 12
target_object_type: table, checked_DML = lok, iter = 0, worker STDLOG: Records affected: 0
target_object_type: table, checked_DML = lok, iter = 1, restarts number to be tested: 12
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG:
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: ID
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: =======
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: -14
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: -13
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: -12
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: -11
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: -10
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: -9
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: -8
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: -7
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: -6
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: -5
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: -4
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: -3
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: 1
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: 2
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG:
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: Records affected: 14
target_object_type: table, checked_DML = lok, iter = 1, worker STDLOG: Records affected: 0
target_object_type: table, checked_DML = lok, iter = 1, worker STDERR: Statement failed, SQLSTATE = 40001
target_object_type: table, checked_DML = lok, iter = 1, worker STDERR: deadlock
target_object_type: table, checked_DML = lok, iter = 1, worker STDERR: -update conflicts with concurrent update
target_object_type: table, checked_DML = lok, iter = 1, worker STDERR: -concurrent transaction number is 1282
target_object_type: table, checked_DML = lok, iter = 1, worker STDERR: -At block line: 1, col: 39
target_object_type: table, checked_DML = lok, iter = 1, worker STDERR: After line 19 in file C:\\FBTESTING\\qabt-repo mp mp_sttm_restart_max_limit.sql
target_object_type: view, checked_DML = upd, iter = 0, restarts number to be tested: 10
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: Records affected: 12
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG:
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: ID
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: =======
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -1200
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -1100
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -1000
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -900
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -800
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -700
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -600
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -500
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -400
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -300
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 100
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 200
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG:
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: Records affected: 12
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG:
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: OLD_ID OP SNAP_NO_RANK
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: ======= ====== =====================
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 1
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 2
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 2
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 3
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 3
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 4
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 4
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 5
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 5
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 6
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 6
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 7
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 7
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 8
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 8
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 9
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 9
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 10
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 10
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 2 UPD 11
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: 1 UPD 11
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -3 UPD 11
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -4 UPD 11
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -5 UPD 11
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -6 UPD 11
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -7 UPD 11
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -8 UPD 11
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -9 UPD 11
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -10 UPD 11
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -11 UPD 11
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: -12 UPD 11
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG:
target_object_type: view, checked_DML = upd, iter = 0, worker STDLOG: Records affected: 31
target_object_type: view, checked_DML = upd, iter = 1, restarts number to be tested: 12
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: Records affected: 2
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG:
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: ID
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: =======
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: -14
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: -13
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: -12
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: -11
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: -10
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: -9
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: -8
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: -7
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: -6
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: -5
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: -4
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: -3
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 1
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 2
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG:
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: Records affected: 14
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG:
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: OLD_ID OP SNAP_NO_RANK
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: ======= ====== =====================
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 1
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 2
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 2
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 3
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 3
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 4
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 4
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 5
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 5
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 6
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 6
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 7
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 7
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 8
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 8
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 9
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 9
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 10
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 10
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 2 UPD 11
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: 1 UPD 11
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG:
target_object_type: view, checked_DML = upd, iter = 1, worker STDLOG: Records affected: 21
target_object_type: view, checked_DML = upd, iter = 1, worker STDERR: Statement failed, SQLSTATE = 40001
target_object_type: view, checked_DML = upd, iter = 1, worker STDERR: deadlock
target_object_type: view, checked_DML = upd, iter = 1, worker STDERR: -update conflicts with concurrent update
target_object_type: view, checked_DML = upd, iter = 1, worker STDERR: -concurrent transaction number is 1630
target_object_type: view, checked_DML = upd, iter = 1, worker STDERR: After line 18 in file C:\\FBTESTING\\qabt-repo mp mp_sttm_restart_max_limit.sql
target_object_type: view, checked_DML = mer, iter = 0, restarts number to be tested: 10
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: Records affected: 12
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG:
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: ID
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: =======
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -1200
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -1100
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -1000
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -900
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -800
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -700
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -600
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -500
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -400
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -300
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 100
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 200
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG:
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: Records affected: 12
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG:
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: OLD_ID OP SNAP_NO_RANK
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: ======= ====== =====================
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 1
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 2
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 2
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 3
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 3
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 4
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 4
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 5
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 5
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 6
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 6
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 7
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 7
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 8
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 8
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 9
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 9
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 10
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 10
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 2 UPD 11
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: 1 UPD 11
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -3 UPD 11
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -4 UPD 11
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -5 UPD 11
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -6 UPD 11
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -7 UPD 11
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -8 UPD 11
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -9 UPD 11
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -10 UPD 11
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -11 UPD 11
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: -12 UPD 11
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG:
target_object_type: view, checked_DML = mer, iter = 0, worker STDLOG: Records affected: 31
target_object_type: view, checked_DML = mer, iter = 1, restarts number to be tested: 12
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: Records affected: 2
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG:
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: ID
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: =======
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: -14
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: -13
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: -12
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: -11
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: -10
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: -9
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: -8
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: -7
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: -6
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: -5
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: -4
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: -3
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 1
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 2
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG:
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: Records affected: 14
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG:
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: OLD_ID OP SNAP_NO_RANK
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: ======= ====== =====================
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 1
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 2
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 2
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 3
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 3
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 4
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 4
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 5
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 5
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 6
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 6
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 7
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 7
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 8
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 8
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 9
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 9
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 10
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 10
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 2 UPD 11
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: 1 UPD 11
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG:
target_object_type: view, checked_DML = mer, iter = 1, worker STDLOG: Records affected: 21
target_object_type: view, checked_DML = mer, iter = 1, worker STDERR: Statement failed, SQLSTATE = 40001
target_object_type: view, checked_DML = mer, iter = 1, worker STDERR: deadlock
target_object_type: view, checked_DML = mer, iter = 1, worker STDERR: -update conflicts with concurrent update
target_object_type: view, checked_DML = mer, iter = 1, worker STDERR: -concurrent transaction number is 1983
target_object_type: view, checked_DML = mer, iter = 1, worker STDERR: After line 18 in file C:\\FBTESTING\\qabt-repo mp mp_sttm_restart_max_limit.sql
target_object_type: view, checked_DML = del, iter = 0, restarts number to be tested: 10
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: Records affected: 12
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: Records affected: 0
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG:
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: OLD_ID OP SNAP_NO_RANK
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: ======= ====== =====================
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 1
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 2
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 2
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 3
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 3
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 4
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 4
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 5
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 5
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 6
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 6
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 7
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 7
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 8
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 8
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 9
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 9
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 10
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 10
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 2 DEL 11
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: 1 DEL 11
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: -3 DEL 11
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: -4 DEL 11
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: -5 DEL 11
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: -6 DEL 11
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: -7 DEL 11
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: -8 DEL 11
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: -9 DEL 11
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: -10 DEL 11
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: -11 DEL 11
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: -12 DEL 11
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG:
target_object_type: view, checked_DML = del, iter = 0, worker STDLOG: Records affected: 31
target_object_type: view, checked_DML = del, iter = 1, restarts number to be tested: 12
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: Records affected: 2
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG:
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: ID
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: =======
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: -14
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: -13
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: -12
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: -11
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: -10
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: -9
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: -8
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: -7
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: -6
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: -5
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: -4
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: -3
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 1
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 2
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG:
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: Records affected: 14
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG:
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: OLD_ID OP SNAP_NO_RANK
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: ======= ====== =====================
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 1
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 2
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 2
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 3
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 3
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 4
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 4
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 5
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 5
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 6
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 6
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 7
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 7
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 8
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 8
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 9
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 9
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 10
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 10
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 2 DEL 11
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: 1 DEL 11
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG:
target_object_type: view, checked_DML = del, iter = 1, worker STDLOG: Records affected: 21
target_object_type: view, checked_DML = del, iter = 1, worker STDERR: Statement failed, SQLSTATE = 40001
target_object_type: view, checked_DML = del, iter = 1, worker STDERR: deadlock
target_object_type: view, checked_DML = del, iter = 1, worker STDERR: -update conflicts with concurrent update
target_object_type: view, checked_DML = del, iter = 1, worker STDERR: -concurrent transaction number is 2336
target_object_type: view, checked_DML = del, iter = 1, worker STDERR: After line 18 in file C:\\FBTESTING\\qabt-repo mp mp_sttm_restart_max_limit.sql
target_object_type: view, checked_DML = lok, iter = 0, restarts number to be tested: 10
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG:
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: ID
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: =======
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: 1
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: 2
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: 3
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: 4
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: 5
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: 6
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: 7
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: 8
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: 9
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: 10
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: 11
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: 12
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG:
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: Records affected: 12
target_object_type: view, checked_DML = lok, iter = 0, worker STDLOG: Records affected: 0
target_object_type: view, checked_DML = lok, iter = 0, worker STDERR: Statement failed, SQLSTATE = 42000
target_object_type: view, checked_DML = lok, iter = 0, worker STDERR: Dynamic SQL Error
target_object_type: view, checked_DML = lok, iter = 0, worker STDERR: -SQL error code = -104
target_object_type: view, checked_DML = lok, iter = 0, worker STDERR: -WITH LOCK can be used only with a single physical table
target_object_type: view, checked_DML = lok, iter = 0, worker STDERR: After line 19 in file C:\\FBTESTING\\qabt-repo mp mp_sttm_restart_max_limit.sql
target_object_type: view, checked_DML = lok, iter = 1, restarts number to be tested: 12
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG:
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: ID
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: =======
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: 1
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: 2
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: 3
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: 4
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: 5
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: 6
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: 7
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: 8
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: 9
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: 10
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: 11
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: 12
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: 13
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: 14
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG:
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: Records affected: 14
target_object_type: view, checked_DML = lok, iter = 1, worker STDLOG: Records affected: 0
target_object_type: view, checked_DML = lok, iter = 1, worker STDERR: Statement failed, SQLSTATE = 42000
target_object_type: view, checked_DML = lok, iter = 1, worker STDERR: Dynamic SQL Error
target_object_type: view, checked_DML = lok, iter = 1, worker STDERR: -SQL error code = -104
target_object_type: view, checked_DML = lok, iter = 1, worker STDERR: -WITH LOCK can be used only with a single physical table
target_object_type: view, checked_DML = lok, iter = 1, worker STDERR: After line 19 in file C:\\FBTESTING\\qabt-repo mp mp_sttm_restart_max_limit.sql
"""
@pytest.mark.version('>=4.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
| 80.943556
| 266
| 0.591425
| 11,515
| 86,043
| 4.200261
| 0.044116
| 0.149278
| 0.236861
| 0.146797
| 0.877538
| 0.868503
| 0.86567
| 0.858227
| 0.856697
| 0.855456
| 0
| 0.03716
| 0.311298
| 86,043
| 1,062
| 267
| 81.019774
| 0.779034
| 0.168393
| 0
| 0.083682
| 0
| 0.052999
| 0.990679
| 0.008444
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001395
| false
| 0
| 0.002789
| 0
| 0.004184
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c4a14e36f339606092582d4f4cdcb234299c08ab
| 1,022
|
py
|
Python
|
tests/unit/scalar/test_boolean.py
|
alexchamberlain/tartiflette
|
6904b0f47770c348553e907be5f5bdb0929fe149
|
[
"MIT"
] | null | null | null |
tests/unit/scalar/test_boolean.py
|
alexchamberlain/tartiflette
|
6904b0f47770c348553e907be5f5bdb0929fe149
|
[
"MIT"
] | null | null | null |
tests/unit/scalar/test_boolean.py
|
alexchamberlain/tartiflette
|
6904b0f47770c348553e907be5f5bdb0929fe149
|
[
"MIT"
] | null | null | null |
import pytest
@pytest.mark.parametrize(
"val,expected",
[
("true", True),
("false", True),
("1", True),
(1, True),
(0, False),
("0", True),
(3.6, True),
(0.0, False),
("a", True),
(True, True),
(None, False),
(False, False),
],
)
def test_scalar_boolean_coerce_output(val, expected):
from tartiflette.scalar.builtins.boolean import ScalarBoolean
assert ScalarBoolean().coerce_output(val) == expected
@pytest.mark.parametrize(
"val,expected",
[
("true", True),
("false", True),
("1", True),
(1, True),
(0, False),
("0", True),
(3.6, True),
(0.0, False),
("a", True),
(True, True),
(None, False),
(False, False),
],
)
def test_scalar_boolean_coerce_input(val, expected):
from tartiflette.scalar.builtins.boolean import ScalarBoolean
assert ScalarBoolean().coerce_input(val) == expected
| 21.291667
| 65
| 0.518591
| 106
| 1,022
| 4.90566
| 0.245283
| 0.126923
| 0.069231
| 0.092308
| 0.892308
| 0.892308
| 0.892308
| 0.892308
| 0.892308
| 0.892308
| 0
| 0.022792
| 0.313112
| 1,022
| 47
| 66
| 21.744681
| 0.717949
| 0
| 0
| 0.780488
| 0
| 0
| 0.046967
| 0
| 0
| 0
| 0
| 0
| 0.04878
| 1
| 0.04878
| false
| 0
| 0.073171
| 0
| 0.121951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f200b0a330d754aa38688024338c5f49b27c7b77
| 862
|
py
|
Python
|
parser/team20/console.py
|
Ocsa/tytus
|
3ccb7c7616c26264a827bca1e9084b58e11ddd0f
|
[
"MIT"
] | null | null | null |
parser/team20/console.py
|
Ocsa/tytus
|
3ccb7c7616c26264a827bca1e9084b58e11ddd0f
|
[
"MIT"
] | null | null | null |
parser/team20/console.py
|
Ocsa/tytus
|
3ccb7c7616c26264a827bca1e9084b58e11ddd0f
|
[
"MIT"
] | null | null | null |
try:
import Tytus_GUI_console
except Exception as e:
i=0#print(e)
def print_error(data_type: str, print_: str):
try:
Tytus_GUI_console.print_error(data_type, print_)
except Exception as e:
i=0#print(e)
def print_warning(data_type: str, print_: str):
try:
Tytus_GUI_console.print_warning(data_type, print_)
except Exception as e:
i=0#print(e)
def print_success(data_type: str, print_: str):
try:
Tytus_GUI_console.print_success(data_type, print_)
except Exception as e:
i=0#print(e)
def print_text(data_type: str, print_: str):
try:
Tytus_GUI_console.print_text(data_type, print_)
except Exception as e:
i=0#print(e)
def print_table(print_: str):
try:
Tytus_GUI_console.print_table(print_)
except Exception as e:
i=0#print(e)
| 25.352941
| 58
| 0.663573
| 133
| 862
| 4
| 0.157895
| 0.120301
| 0.169173
| 0.203008
| 0.849624
| 0.849624
| 0.849624
| 0.791353
| 0.791353
| 0.733083
| 0
| 0.00916
| 0.240139
| 862
| 34
| 59
| 25.352941
| 0.803053
| 0.055684
| 0
| 0.62069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172414
| false
| 0
| 0.034483
| 0
| 0.206897
| 0.344828
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
485cd7240260c7ac91db2a8d2897e9c98c889fab
| 5,622
|
py
|
Python
|
QuadProg/max_kappa.py
|
ranr01/Rubin2017Balanced
|
e3725170abd3c16309b189ebdf4a48bdd1835e0f
|
[
"Unlicense"
] | 3
|
2020-02-04T18:38:56.000Z
|
2021-01-26T04:34:34.000Z
|
QuadProg/max_kappa.py
|
ranr01/Rubin2017Balanced
|
e3725170abd3c16309b189ebdf4a48bdd1835e0f
|
[
"Unlicense"
] | null | null | null |
QuadProg/max_kappa.py
|
ranr01/Rubin2017Balanced
|
e3725170abd3c16309b189ebdf4a48bdd1835e0f
|
[
"Unlicense"
] | null | null | null |
import cvxopt
import numpy as np
def sign_constrained_perceptron_max_kappa_out(X,y,g,Gamma=1.,Lambda=1e5,\
external_input=None):
'''Finds the maximal $\kappa_\mathrm{out}$ solution for a sign constrained
Percptron, with |w|<=Gamma.
## Paramaters:
X - Input patterns (N x P)
y - Patterns' labels (P x 1)
g - Sign of weights (N x 1, of +1 for excitatory and -1 for inhibitory)
Gamma - maximal norm of solution's weight vector
Lambda - Regularization parameter for feasibitilty variable (Should be >>1)
## Returns
w - Perceptron weights
theta - Perceptron threshold
tau - Regularization variable (tau=0 if solution found and tau>0 if no
solution exists)
sol - Full output dictionary from the CVXOPT solver
converged_to_solution - True if solution found (based on actual classification)
NOTE: w and theta are the so called canonical weights. The weigths in units
of threshold are given by threshold * w / theta.
In terms of w and theta $\kappa_\mathrm{out}$ is given by 1/theta and
$\kappa_\mathrm{in}$ is given by 1/|w|.
'''
N,P =X.shape
y = np.array(y).reshape((P,1))
g = np.array(g).reshape((N,1))
if external_input is None:
Theta_mu = np.ones((P,1))
else:
Theta_mu = -np.array(external_input).reshape((P,1))+1.
A = np.hstack([X.T*y[:,np.zeros(N,int)]*g[:,np.zeros(P,int)].T, \
-y*Theta_mu, np.ones((P,1))])
beta = np.ones((P,1))
a = np.zeros((N+2,1))
a[N] = 1.
a[N+1] = Lambda
#We need to solve min(a^Tx) subject to:
# Ax>=beta
# x>=0
# |x|<Gamma*theta
#cvxopt solves:
# min(c^Tx) subjet to
# Gx+s=h
# s>=0
#second order cone:
# s0=Gamma*x[N]
# i=1...N si=x[i-1]
# s0>=||s||
# So matrix is N+1XN+2
G_0 = np.zeros((N+1,N+2))
G_0[0,-2] = -Gamma
for i in range(1,N+1):
G_0[i,i-1] = -1
c = cvxopt.matrix(a)
G = cvxopt.matrix(np.vstack(\
[np.diag(np.vstack([-np.ones((N,1)),[[-1.],[-1]]]).flatten()),\
-A,\
G_0])\
)
h = cvxopt.matrix(np.vstack([np.zeros((N+2,1)),-beta,np.zeros((N+1,1))]))
dims = {'l':N+2+P, 'q': [N+1], 's':[]}
# Solving the linear program
sol = cvxopt.solvers.conelp(c,G,h,dims)
# extracting solution
w = g*np.array(sol['x'][:N])
theta = sol['x'][N]
tau = sol['x'][N+1]
# testing the solution
converged_to_solution = (y.T*(np.dot(w.T,X)-theta*Theta_mu.T)>=1.).all()
if not converged_to_solution:
print("Did not find solution. tau={}".format(tau))
return w,theta,tau,sol,converged_to_solution
def sign_constrained_perceptron_max_kappa_in(X,y,g,Gamma=1.0,Lambda=1e5,\
external_input=None):
'''Finds the maximal $\kappa_\mathrm{in}$ solution for a sign constrained
Percptron, with |w|<=Gamma.
## Paramaters:
X - Input patterns (N x P)
y - Patterns' labels (P x 1)
g - Sign of weights (N x 1, of +1 for excitatory and -1 for inhibitory)
Gamma - maximal norm of solution's weight vector
Lambda - Regularization parameter for feasibitilty variable (Should be >>1)
## Returns
w - Perceptron weights
theta - Perceptron threshold
tau - Regularization variable (tau=0 if solution found and tau>0 if no
solution exists)
sol - Full output dictionary from the CVXOPT solver
converged_to_solution - True if solution found (based on actual classification)
NOTE: w and theta are the so called canonical weights. The weigths in units
of threshold are given by threshold * w / theta.
In terms of w and theta $\kappa_\mathrm{out}$ is given by 1/theta and
$\kappa_\mathrm{in}$ is given by 1/|w|.
'''
N,P = X.shape
y = np.array(y).reshape((P,1))
g = np.array(g).reshape((N,1))
if external_input is None:
Theta_mu = np.ones((P,1))
else:
Theta_mu = -np.array(external_input).reshape((P,1))+1.
A = np.hstack([X.T*y[:,np.zeros(N,int)]*g[:,np.zeros(P,int)].T, \
-y*Theta_mu, np.ones((P,1))])
beta = np.ones((P,1))
a = np.zeros((N+2,1))
a[N+1] = Lambda
Q = np.eye(N+2)
Q[N,N] = 0.0
Q[N+1,N+1] = 0.0
# We need to solve min(1/2x^TQx+a^Tx) subject to:
# Ax>=beta
# x>=0
# |x|<Gamma*theta
#second order cone:
# s0=Gamma*x[N]
# i=1...N si=x[i-1]
# s0>=||s||
# So matrix is N+1XN+2
G_0 = np.zeros((N+1,N+2))
G_0[0,-2] = -Gamma
for i in range(1,N+1):
G_0[i,i-1] = -1
#cvxopt qp solves:
# min(1/2x^TPx+q^Tx) subjet to
# Gx+s=h
# S>=0
q = cvxopt.matrix(a)
P_opt = cvxopt.matrix(Q)
G = cvxopt.matrix(np.vstack(\
[np.diag(-np.ones(N+2)),\
-A,\
G_0]))
h = cvxopt.matrix(np.vstack([np.zeros((N+2,1)),-beta,np.zeros((N+1,1))]))
dims = {'l':N+2+P, 'q': [N+1], 's':[]}
#print(dims)
# solving the quadratic program
sol = cvxopt.solvers.coneqp(P_opt,q,G,h,dims)
# extracting solution
w = g*np.array(sol['x'][:N])
theta = sol['x'][N]
tau = sol['x'][N+1]
# testing the solution
converged_to_solution = (y.T*(np.dot(w.T,X)-theta*Theta_mu.T)>=1.).all()
if not converged_to_solution:
print("Did not find solution. tau={}".format(tau))
return w,theta,tau,sol,converged_to_solution
| 31.58427
| 87
| 0.559054
| 908
| 5,622
| 3.39978
| 0.162996
| 0.011662
| 0.025915
| 0.015549
| 0.907353
| 0.886297
| 0.862974
| 0.862974
| 0.833819
| 0.833819
| 0
| 0.028975
| 0.28175
| 5,622
| 177
| 88
| 31.762712
| 0.735513
| 0.416756
| 0
| 0.756757
| 0
| 0
| 0.023826
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.027027
| 0
| 0.081081
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
487395afcbbf5e5cf3641e9bc2c281ecd3b260a4
| 14,501
|
py
|
Python
|
Siamese Models/VGG16 Backbone/RTApp.py
|
123prashanth123/Fault-Detection-System
|
fa59ca81ce4627a42648e654b55cdc505cde2103
|
[
"MIT"
] | 1
|
2021-07-08T19:30:52.000Z
|
2021-07-08T19:30:52.000Z
|
Siamese Models/VGG16 Backbone/RTApp.py
|
123prashanth123/Fault-Detection-System
|
fa59ca81ce4627a42648e654b55cdc505cde2103
|
[
"MIT"
] | 1
|
2021-07-09T11:27:54.000Z
|
2021-07-09T11:27:54.000Z
|
Siamese Models/VGG16 Backbone/RTApp.py
|
123prashanth123/Fault-Detection-System
|
fa59ca81ce4627a42648e654b55cdc505cde2103
|
[
"MIT"
] | 1
|
2021-07-26T08:58:43.000Z
|
2021-07-26T08:58:43.000Z
|
"""
Realtime Inference
"""
import os
import platform
import cv2
import torch
import numpy as np
import utils as u
import Models
# ******************************************************************************************************************** #
# Inference Helper
def __help__(frame=None, anchor=None, model=None, show_prob=True, pt1=None, pt2=None, fea_extractor=None, roi_extractor=None):
"""
frame : Current frame being processed
anchor : Anchor Image
model : Siamese Network Model
show_prob : Flag to control whether to display the similarity score
pt1 : Start Point of the Reference Bounding Box
pt2 : End Point of the Reference Bounding Box
fea_extractor : Feature Extraction Model
"""
disp_frame = frame.copy()
# Alpha Blend Anchor Image if it is passed
if anchor is not None:
disp_frame = u.alpha_blend(anchor, disp_frame, 0.15)
# Resize + Center Crop (256x256 ---> 224x224)
frame = u.preprocess(frame, change_color_space=False)
########## Dynamic Bounding Box during Inference ##########
# Obtain the bounding box coordinates
x1, y1, x2, y2 = u.get_box_coordinates(Models.roi_extractor, u.ROI_TRANSFORM, disp_frame)
############################################################
# Perform Inference on current frame
with torch.no_grad():
features = u.normalize(fea_extractor(u.FEA_TRANSFORM(frame).to(u.DEVICE).unsqueeze(dim=0)))
y_pred = torch.sigmoid(model(features))[0][0].item()
# Prediction > Upper Bound -----> Match
# Lower Bound <= Prediction <= Upper Bound -----> Possible Match
# Prediction < Lower Bound -----> Defective
if show_prob:
if y_pred >= u.upper_bound_confidence:
cv2.putText(img=disp_frame, text="Match, {:.5f}".format(y_pred), org=(25, 75),
fontScale=1, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
color=u.CLI_GREEN, thickness=2)
# if pt1[0] != 'None' and pt1[1] != 'None' and pt2[0] != 'None' and pt2[1] != 'None':
# cv2.rectangle(img=disp_frame,
# pt1=(int(pt1[0]) - u.RELIEF, int(pt1[1]) - u.RELIEF), pt2=(int(pt2[0]) + u.RELIEF, int(pt2[1]) + u.RELIEF),
# color=u.CLI_GREEN, thickness=2)
cv2.rectangle(img=disp_frame, pt1=(x1, y1), pt2=(x2, y2), color=u.CLI_GREEN, thickness=2)
elif u.lower_bound_confidence <= y_pred <= u.upper_bound_confidence:
cv2.putText(img=disp_frame, text="Possible Match, {:.5f}".format(y_pred), org=(25, 75),
fontScale=1, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
color=u.GUI_ORANGE, thickness=2)
# if pt1[0] != 'None' and pt1[1] != 'None' and pt2[0] != 'None' and pt2[1] != 'None':
# cv2.rectangle(img=disp_frame,
# pt1=(int(pt1[0]) - u.RELIEF, int(pt1[1]) - u.RELIEF), pt2=(int(pt2[0]) + u.RELIEF, int(pt2[1]) + u.RELIEF),
# color=u.GUI_ORANGE, thickness=2)
cv2.rectangle(img=disp_frame, pt1=(x1, y1), pt2=(x2, y2), color=u.GUI_ORANGE, thickness=2)
else:
cv2.putText(img=disp_frame, text="Defective, {:.5f}".format(y_pred), org=(25, 75),
fontScale=1, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
color=u.CLI_RED, thickness=2)
# if pt1[0] != 'None' and pt1[1] != 'None' and pt2[0] != 'None' and pt2[1] != 'None':
# cv2.rectangle(img=disp_frame,
# pt1=(int(pt1[0]) - u.RELIEF, int(pt1[1]) - u.RELIEF), pt2=(int(pt2[0]) + u.RELIEF, int(pt2[1]) + u.RELIEF),
# color=u.CLI_RED, thickness=2)
cv2.rectangle(img=disp_frame, pt1=(x1, y1), pt2=(x2, y2), color=u.CLI_RED, thickness=2)
else:
if y_pred >= u.lower_bound_confidence:
# if pt1[0] != 'None' and pt1[1] != 'None' and pt2[0] != 'None' and pt2[1] != 'None':
# cv2.rectangle(img=disp_frame,
# pt1=(int(pt1[0]) - u.RELIEF, int(pt1[1]) - u.RELIEF), pt2=(int(pt2[0]) + u.RELIEF, int(pt2[1]) + u.RELIEF),
# color=u.CLI_GREEN, thickness=2)
# else:
# cv2.putText(img=disp_frame, text="Match", org=(25, 75),
# fontScale=1, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
# color=(0, 255, 0), thickness=2)
cv2.rectangle(img=disp_frame, pt1=(x1, y1), pt2=(x2, y2), color=u.CLI_GREEN, thickness=2)
cv2.putText(img=disp_frame, text="Match", org=(25, 75),
fontScale=1, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
color=u.CLI_GREEN, thickness=2)
elif u.lower_bound_confidence <= y_pred <= u.upper_bound_confidence:
# if pt1[0] != 'None' and pt1[1] != 'None' and pt2[0] != 'None' and pt2[1] != 'None':
# cv2.rectangle(img=disp_frame,
# pt1=(int(pt1[0]) - u.RELIEF, int(pt1[1]) - u.RELIEF), pt2=(int(pt2[0]) + u.RELIEF, int(pt2[1]) + u.RELIEF),
# color=u.GUI_ORANGE, thickness=2)
# else:
# cv2.putText(img=disp_frame, text="Possible Match", org=(25, 75),
# fontScale=1, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
# color=u.GUI_ORANGE, thickness=2)
cv2.rectangle(img=disp_frame, pt1=(x1, y1), pt2=(x2, y2), color=u.GUI_ORANGE, thickness=2)
cv2.putText(img=disp_frame, text="Match", org=(25, 75),
fontScale=1, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
color=u.GUI_ORANGE, thickness=2)
else:
# if pt1[0] != 'None' and pt1[1] != 'None' and pt2[0] != 'None' and pt2[1] != 'None':
# cv2.rectangle(img=disp_frame,
# pt1=(int(pt1[0]) - u.RELIEF, int(pt1[1]) - u.RELIEF), pt2=(int(pt2[0]) + u.RELIEF, int(pt2[1]) + u.RELIEF),
# color=u.CLI_RED, thickness=2)
# else:
# cv2.putText(img=disp_frame, text="Defective", org=(25, 75),
# fontScale=1, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
# color=u.CLI_RED, thickness=2)
cv2.rectangle(img=disp_frame, pt1=(x1, y1), pt2=(x2, y2), color=u.CLI_RED, thickness=2)
cv2.putText(img=disp_frame, text="Match", org=(25, 75),
fontScale=1, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
color=u.CLI_RED, thickness=2)
return disp_frame
# ******************************************************************************************************************** #
# Realtime Inference
def realtime(device_id=None, part_name=None, model=None, save=False, show_prob=False):
"""
device_id : Device ID of the capture object
part_name : Name of the part under inference
model : Siamese Network Model
save : Flag to control whether to save inference to a video file
fea_extractor : Feature Extraction Model
show_prob : Flag to control whether to display the similarity score
"""
base_path = os.path.join(u.DATASET_PATH, part_name)
# Read the anchor image
disp_anchor_image = cv2.imread(os.path.join(os.path.join(base_path, "Positive"), "Snapshot_1.png"), cv2.IMREAD_COLOR)
# Load the model
path = os.path.join(os.path.join(base_path, "Checkpoints"), "State.pt")
model.load_state_dict(torch.load(path, map_location=u.DEVICE)["model_state_dict"])
model.eval()
model.to(u.DEVICE)
# Initialize the capture object
if platform.system() != "Windows":
cap = cv2.VideoCapture(device_id)
else:
cap = cv2.VideoCapture(device_id, cv2.CAP_DSHOW)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, u.CAM_HEIGHT)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, u.CAM_WIDTH)
cap.set(cv2.CAP_PROP_FPS, u.FPS)
# Save a video file if flag is set
if save:
filename = os.path.join(base_path, "{}.mp4".format(part_name))
codec = cv2.VideoWriter_fourcc(*"mp4v")
out = cv2.VideoWriter(filename, codec, 30.01, (2*u.camWidth, u.camHeight))
# Open the file containing reference box coordinates
file = open(os.path.join(base_path, "Box.txt"), "r")
data = file.read().split(",")
file.close()
countp, countn = len(os.listdir(os.path.join(base_path, "Positive"))), len(os.listdir(os.path.join(base_path, "Negative"))) + 1
if countn == 0:
countn = 1
# Read data from capture object
while cap.isOpened():
_, frame = cap.read()
# Apply CLAHE (2, 2) Preprocessing. May not be required once lighting issue is fixed
frame = u.clahe_equ(frame)
# Perform Inference
disp_frame = __help__(frame=frame, model=model,
fea_extractor=Models.fea_extractor, roi_extractor=Models.roi_extractor,
show_prob=show_prob, pt1=(data[0], data[1]), pt2=(data[2], data[3]))
# ********************************************************************* #
# Press 'p' if the object detected is a False Negative
if cv2.waitKey(u.DELAY) == ord("p"):
print("")
cv2.imwrite(os.path.join(os.path.join(base_path, "Positive"), "Extra_{}.png".format(countp)), frame)
print("Captured Snapshot - {} and save to Positive Directory".format(countp))
countp += 1
# Press 'n' if the object detected is a False Positive
if cv2.waitKey(u.DELAY) == ord("n"):
print("")
cv2.imwrite(os.path.join(os.path.join(base_path, "Negative"), "Extra_{}.png".format(countn)), frame)
print("Captured Snapshot - {} and save to Negative Directory".format(countn))
countn += 1
# ********************************************************************* #
disp_frame = np.hstack((disp_anchor_image, disp_frame))
if save:
out.write(disp_frame)
# Display the frame
cv2.imshow("Feed", disp_frame)
# Press 'q' to Quit
if cv2.waitKey(u.DELAY) == ord("q"):
break
# Release capture object and destory all windows
cap.release()
cv2.destroyAllWindows()
# ******************************************************************************************************************** #
# Inference performed on video file
def video(filename=None, part_name=None, model=None, save=False, show_prob=True):
"""
filename : Name of the Video File
part_name : Name of the part under inference
model : Siamese Network Model
save : Flag to control whether to save inference to a video file
fea_extractor : Feature Extraction Model
show_prob : Flag to control whether to display the similarity score
"""
base_path = os.path.join(u.DATASET_PATH, part_name)
# Read the anchor image
disp_anchor_image = cv2.imread(os.path.join(os.path.join(base_path, "Positive"), "Snapshot_1.png"), cv2.IMREAD_COLOR)
# Load the model
path = os.path.join(os.path.join(base_path, "Checkpoints"), "State.pt")
model.load_state_dict(torch.load(path, map_location=u.DEVICE)["model_state_dict"])
model.eval()
model.to(u.DEVICE)
# Initialize the capture object
cap = cv2.VideoCapture(os.path.join(os.path.join(base_path, "Video"), "FILENAME.mp4"))
# Save a video file if flag is set
if save:
filename = os.path.join(base_path, "{}.mp4".format(part_name))
codec = cv2.VideoWriter_fourcc(*"mp4v")
out = cv2.VideoWriter(filename, codec, 30.01, (2*u.camWidth, u.camHeight))
# Open the file containing reference box coordinates
file = open(os.path.join(base_path, "Box.txt"), "r")
data = file.read().split(",")
file.close()
countp, countn = len(os.listdir(os.path.join(base_path, "Positive"))), len(os.listdir(os.path.join(base_path, "Negative"))) + 1
if countn == 0:
countn = 1
# Read data from capture object
while cap.isOpened():
ret, frame = cap.read()
if ret:
# Apply CLAHE (2, 2) Preprocessing. May not be required once lighting issue is fixed
frame = u.clahe_equ(frame)
# Perform Inference
disp_frame = __help__(frame=frame, model=model, roi_extractor=Models.roi_extractor,
fea_extractor=Models.fea_extractor, show_prob=show_prob,
pt1=(data[0], data[1]), pt2=(data[2], data[3]))
# ********************************************************************* #
# Press 'p' if the object detected is a False Negative
if cv2.waitKey(u.DELAY) == ord("p"):
print("")
cv2.imwrite(os.path.join(os.path.join(base_path, "Positive"), "Extra_{}.png".format(countp)), frame)
print("Captured Snapshot - {} and save to Positive Directory".format(countp))
countp += 1
# Press 'n' if the object detected is a False Positive
if cv2.waitKey(u.DELAY) == ord("n"):
print("")
cv2.imwrite(os.path.join(os.path.join(base_path, "Negative"), "Extra_{}.png".format(countn)), frame)
print("Captured Snapshot - {} and save to Negative Directory".format(countn))
countn += 1
# ********************************************************************* #
disp_frame = np.hstack((disp_anchor_image, disp_frame))
if save:
out.write(disp_frame)
# Display the frame
cv2.imshow("Feed", disp_frame)
# Press 'q' to Quit
if cv2.waitKey(u.DELAY) == ord("q"):
break
else:
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
# Release capture object and destory all windows
cap.release()
cv2.destroyAllWindows()
# ******************************************************************************************************************** #
| 47.858086
| 140
| 0.535825
| 1,788
| 14,501
| 4.223714
| 0.13255
| 0.042903
| 0.037076
| 0.031515
| 0.841764
| 0.812632
| 0.799126
| 0.798067
| 0.79224
| 0.791711
| 0
| 0.033542
| 0.282463
| 14,501
| 303
| 141
| 47.858086
| 0.692263
| 0.376802
| 0
| 0.704225
| 0
| 0
| 0.06699
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021127
| false
| 0
| 0.049296
| 0
| 0.077465
| 0.056338
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f8d844d51ac2e415f686912aa6d29a681623859
| 400
|
py
|
Python
|
SBaaS_thermodynamics/stage03_quantification_tfba_io.py
|
dmccloskey/SBaaS_thermodynamics
|
0eeed0191f952ea0226ab8bbc234a30638fb2f9f
|
[
"MIT"
] | null | null | null |
SBaaS_thermodynamics/stage03_quantification_tfba_io.py
|
dmccloskey/SBaaS_thermodynamics
|
0eeed0191f952ea0226ab8bbc234a30638fb2f9f
|
[
"MIT"
] | null | null | null |
SBaaS_thermodynamics/stage03_quantification_tfba_io.py
|
dmccloskey/SBaaS_thermodynamics
|
0eeed0191f952ea0226ab8bbc234a30638fb2f9f
|
[
"MIT"
] | null | null | null |
# System
import json
# SBaaS
from .stage03_quantification_tfba_query import stage03_quantification_tfba_query
from SBaaS_base.sbaas_template_io import sbaas_template_io
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
class stage03_quantification_tfba_io(stage03_quantification_tfba_query,sbaas_template_io):
pass;
| 33.333333
| 90
| 0.8825
| 54
| 400
| 6.074074
| 0.333333
| 0.256098
| 0.304878
| 0.27439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021918
| 0.0875
| 400
| 12
| 91
| 33.333333
| 0.876712
| 0.055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.142857
| 0.714286
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
6fbf25dd755040653ab8250f7710cce2ac8b9a51
| 18,087
|
py
|
Python
|
saleor/dashboard/reports/product_sales.py
|
glosoftgroup/KahawaHardware
|
893e94246583addf41c3bb0d58d2ce6bcd233c4f
|
[
"BSD-3-Clause"
] | 1
|
2020-01-22T04:35:31.000Z
|
2020-01-22T04:35:31.000Z
|
saleor/dashboard/reports/product_sales.py
|
glosoftgroup/KahawaHardware
|
893e94246583addf41c3bb0d58d2ce6bcd233c4f
|
[
"BSD-3-Clause"
] | 1
|
2022-02-10T07:42:22.000Z
|
2022-02-10T07:42:22.000Z
|
saleor/dashboard/reports/product_sales.py
|
glosoftgroup/KahawaHardware
|
893e94246583addf41c3bb0d58d2ce6bcd233c4f
|
[
"BSD-3-Clause"
] | null | null | null |
from django.core.exceptions import ObjectDoesNotExist
from django.template.response import TemplateResponse
from django.http import HttpResponse
from django.db.models import Count, Sum, Q
from django.core.paginator import Paginator, EmptyPage, InvalidPage, PageNotAnInteger
import datetime
from django.utils.dateformat import DateFormat
import logging
from operator import itemgetter
from ..views import staff_member_required
from ...sale.models import Sales, SoldItem
from ...product.models import ProductVariant
from ...decorators import permission_decorator, user_trail
from ...utils import render_to_pdf, default_logo
debug_logger = logging.getLogger('debug_logger')
info_logger = logging.getLogger('info_logger')
error_logger = logging.getLogger('error_logger')
@staff_member_required
@permission_decorator('reports.view_sale_reports')
def sales_list(request):
try:
try:
last_sale = Sales.objects.latest('id')
last_date_of_sales = DateFormat(last_sale.created).format('Y-m-d')
except:
last_date_of_sales = DateFormat(datetime.datetime.today()).format('Y-m-d')
total_sales = SoldItem.objects.filter(sales__created__contains=last_date_of_sales).values('product_name','product_category').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(Sum('quantity')).order_by('-quantity__sum')
page = request.GET.get('page', 1)
paginator = Paginator(total_sales, 10)
try:
total_sales = paginator.page(page)
except PageNotAnInteger:
total_sales = paginator.page(1)
except InvalidPage:
total_sales = paginator.page(1)
except EmptyPage:
total_sales = paginator.page(paginator.num_pages)
user_trail(request.user.name, 'accessed sales reports', 'view')
info_logger.info('User: ' + str(request.user.name) + ' accessed the view product sales report page')
return TemplateResponse(request, 'dashboard/reports/product_sales/product_sales.html',
{'pn': paginator.num_pages, 'sales': total_sales,
'date': datetime.datetime.strptime(last_date_of_sales, '%Y-%m-%d').strftime('%b %d, %Y')})
except ObjectDoesNotExist as e:
error_logger.error(e)
@staff_member_required
def sales_paginate(request):
page = int(request.GET.get('page'))
list_sz = request.GET.get('size')
p2_sz = request.GET.get('psize')
select_sz = request.GET.get('select_size')
date = request.GET.get('gid')
order = request.GET.get('order')
today_formart = DateFormat(datetime.date.today())
today = today_formart.format('Y-m-d')
margin = False
if date:
try:
if order == 'qlh':
sales = SoldItem.objects.filter(sales__created__contains=date). \
values('product_category', 'product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(
Sum('quantity')).order_by(
'quantity__sum')
elif order == 'mlh':
items = SoldItem.objects.filter(sales__created__contains=date). \
values('sku', 'product_category', 'product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(
Sum('quantity'))
total_items = []
for t in items:
product = ProductVariant.objects.get(sku=t['sku'])
try:
itemPrice = product.get_cost_price().gross * t['quantity__sum']
except ValueError as e:
itemPrice = product.get_cost_price() * t['quantity__sum']
except:
itemPrice = 0
totalSalesCost = t['total_cost__sum']
try:
unitMargin = totalSalesCost - (itemPrice)
except:
unitMargin = 0
t['unitMargin'] = unitMargin
total_items.append(t)
sales = sorted(total_items, key=itemgetter('unitMargin'))
margin = True
elif order == 'mhl':
items = SoldItem.objects.filter(sales__created__contains=date). \
values('sku', 'product_category', 'product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(
Sum('quantity'))
total_items = []
for t in items:
product = ProductVariant.objects.get(sku=t['sku'])
try:
itemPrice = product.get_cost_price().gross * t['quantity__sum']
except ValueError as e:
itemPrice = product.get_cost_price() * t['quantity__sum']
except:
itemPrice = 0
totalSalesCost = t['total_cost__sum']
try:
unitMargin = totalSalesCost - (itemPrice)
except:
unitMargin = 0
t['unitMargin'] = unitMargin
total_items.append(t)
sales = sorted(total_items, key=itemgetter('unitMargin'), reverse=True)
margin = True
else:
sales = SoldItem.objects.filter(sales__created__contains=date).\
values('product_category','product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(Sum('quantity')).order_by(
'-quantity__sum')
if list_sz:
paginator = Paginator(sales, int(list_sz))
sales = paginator.page(page)
return TemplateResponse(request, 'dashboard/reports/product_sales/p2.html',
{'margin':margin, 'order':order, 'sales': sales, 'pn': paginator.num_pages, 'sz': list_sz, 'gid': date,
'date': datetime.datetime.strptime(date, '%Y-%m-%d').strftime(
'%b %d, %Y')
})
if p2_sz and date:
paginator = Paginator(sales, int(p2_sz))
sales = paginator.page(page)
return TemplateResponse(request, 'dashboard/reports/product_sales/paginate.html',
{'date': datetime.datetime.strptime(date, '%Y-%m-%d').strftime(
'%b %d, %Y'),
'margin':margin, 'order':order, 'sales': sales,'gid': date})
paginator = Paginator(sales, 10)
sales = paginator.page(page)
return TemplateResponse(request, 'dashboard/reports/product_sales/p2.html',
{'margin':margin, 'order':order, 'sales': sales, 'pn': paginator.num_pages, 'sz': 10, 'gid': date,
'date': datetime.datetime.strptime(date, '%Y-%m-%d').strftime(
'%b %d, %Y'), 'today': today})
except ObjectDoesNotExist as e:
return TemplateResponse(request, 'dashboard/reports/product_sales/p2.html', {'date': date})
else:
try:
last_sale = Sales.objects.latest('id')
last_date_of_sales = DateFormat(last_sale.created).format('Y-m-d')
if order == 'qlh':
sales = SoldItem.objects.filter(sales__created__contains=last_date_of_sales). \
values('product_category', 'product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(
Sum('quantity')).order_by(
'quantity__sum')
elif order == 'mlh':
items = SoldItem.objects.filter(sales__created__contains=last_date_of_sales). \
values('sku', 'product_category', 'product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(
Sum('quantity'))
total_items = []
for t in items:
product = ProductVariant.objects.get(sku=t['sku'])
try:
itemPrice = product.get_cost_price().gross * t['quantity__sum']
except ValueError as e:
itemPrice = product.get_cost_price() * t['quantity__sum']
except:
itemPrice = 0
totalSalesCost = t['total_cost__sum']
try:
unitMargin = totalSalesCost - (itemPrice)
except:
unitMargin = 0
t['unitMargin'] = unitMargin
total_items.append(t)
sales = sorted(total_items, key=itemgetter('unitMargin'))
margin = True
elif order == 'mhl':
items = SoldItem.objects.filter(sales__created__contains=last_date_of_sales). \
values('sku', 'product_category', 'product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(
Sum('quantity'))
total_items = []
for t in items:
product = ProductVariant.objects.get(sku=t['sku'])
try:
itemPrice = product.get_cost_price().gross * t['quantity__sum']
except ValueError as e:
itemPrice = product.get_cost_price() * t['quantity__sum']
except:
itemPrice = 0
totalSalesCost = t['total_cost__sum']
try:
unitMargin = totalSalesCost - (itemPrice)
except:
unitMargin = 0
t['unitMargin'] = unitMargin
total_items.append(t)
sales = sorted(total_items, key=itemgetter('unitMargin'), reverse=True)
margin = True
else:
sales = SoldItem.objects.filter(sales__created__contains=last_date_of_sales). \
values('product_category','product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(Sum('quantity')).order_by(
'-quantity__sum')
if list_sz:
paginator = Paginator(sales, int(list_sz))
sales = paginator.page(page)
return TemplateResponse(request, 'dashboard/reports/product_sales/p2.html',
{'margin':margin, 'order':order, 'sales': sales, 'pn': paginator.num_pages, 'sz': list_sz, 'gid': 0,
'date': datetime.datetime.strptime(last_date_of_sales, '%Y-%m-%d').strftime(
'%b %d, %Y')
})
else:
paginator = Paginator(sales, 10)
if p2_sz:
paginator = Paginator(sales, int(p2_sz))
sales = paginator.page(page)
return TemplateResponse(request, 'dashboard/reports/product_sales/paginate.html',
{'margin':margin, 'order':order, 'sales': sales,
'date': datetime.datetime.strptime(last_date_of_sales,'%Y-%m-%d').strftime('%b %d, %Y')})
try:
sales = paginator.page(page)
except PageNotAnInteger:
sales = paginator.page(1)
except InvalidPage:
sales = paginator.page(1)
except EmptyPage:
sales = paginator.page(1)
return TemplateResponse(request, 'dashboard/reports/product_sales/paginate.html', {'margin':margin, 'order':order, 'sales': sales,
'date': datetime.datetime.strptime(
last_date_of_sales,
'%Y-%m-%d').strftime(
'%b %d, %Y')})
except ObjectDoesNotExist as e:
return TemplateResponse(request, 'dashboard/reports/product_sales/p2.html', {'date': datetime.datetime.strptime(last_date_of_sales, '%Y-%m-%d').strftime('%b %d, %Y')})
@staff_member_required
def sales_search(request):
if request.is_ajax():
page = int(request.GET.get('page', 1))
list_sz = request.GET.get('size')
p2_sz = request.GET.get('psize')
q = request.GET.get('q')
order = request.GET.get('order')
margin = False
if list_sz is None:
sz = 10
else:
sz = list_sz
if request.GET.get('gid'):
date = request.GET.get('gid')
else:
try:
last_sale = Sales.objects.latest('id')
date = DateFormat(last_sale.created).format('Y-m-d')
except:
date = DateFormat(datetime.datetime.today()).format('Y-m-d')
if q is not None:
all_sales = SoldItem.objects.filter(
Q(product_name__icontains=q) |
Q(product_category__icontains=q))
if order == 'qlh':
sales = all_sales.filter(sales__created__contains=date). \
values('product_category','product_name'). \
annotate(c=Count('product_name', distinct=True)).annotate(Sum('total_cost')). \
annotate(Sum('quantity')).order_by('quantity__sum')
elif order == 'mlh':
items = all_sales.filter(sales__created__contains=date). \
values('sku', 'product_category', 'product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(
Sum('quantity'))
total_items = []
for t in items:
product = ProductVariant.objects.get(sku=t['sku'])
try:
itemPrice = product.get_cost_price().gross * t['quantity__sum']
except ValueError as e:
itemPrice = product.get_cost_price() * t['quantity__sum']
except:
itemPrice = 0
totalSalesCost = t['total_cost__sum']
try:
unitMargin = totalSalesCost - (itemPrice)
except:
unitMargin = 0
t['unitMargin'] = unitMargin
total_items.append(t)
sales = sorted(total_items, key=itemgetter('unitMargin'))
margin = True
elif order == 'mhl':
items = all_sales.filter(sales__created__contains=date). \
values('sku', 'product_category', 'product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(
Sum('quantity'))
total_items = []
for t in items:
product = ProductVariant.objects.get(sku=t['sku'])
try:
itemPrice = product.get_cost_price().gross * t['quantity__sum']
except ValueError as e:
itemPrice = product.get_cost_price() * t['quantity__sum']
except:
itemPrice = 0
totalSalesCost = t['total_cost__sum']
try:
unitMargin = totalSalesCost - (itemPrice)
except:
unitMargin = 0
t['unitMargin'] = unitMargin
total_items.append(t)
sales = sorted(total_items, key=itemgetter('unitMargin'), reverse=True)
margin = True
else:
sales = all_sales.filter(sales__created__contains=date). \
values('product_category', 'product_name'). \
annotate(c=Count('product_name', distinct=True)).annotate(Sum('total_cost')). \
annotate(Sum('quantity')).order_by('-quantity__sum')
if p2_sz:
paginator = Paginator(sales, int(p2_sz))
sales = paginator.page(page)
return TemplateResponse(request, 'dashboard/reports/product_sales/paginate.html',
{'margin':margin, 'order':order, 'sales': sales, 'date': datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%b %d, %Y')})
if list_sz:
paginator = Paginator(sales, int(list_sz))
sales = paginator.page(page)
return TemplateResponse(request, 'dashboard/reports/product_sales/search.html',
{'margin':margin, 'order':order, 'sales': sales, 'pn': paginator.num_pages, 'sz': list_sz,
'gid': request.GET.get('gid'), 'q': q,
'date': datetime.datetime.strptime(date, '%Y-%m-%d').strftime(
'%b %d, %Y')})
paginator = Paginator(sales, 10)
sales = paginator.page(page)
return TemplateResponse(request, 'dashboard/reports/product_sales/search.html',
{'margin':margin, 'order':order, 'sales': sales, 'pn': paginator.num_pages, 'sz': sz,
'gid': request.GET.get('gid'),
'date': datetime.datetime.strptime(date, '%Y-%m-%d').strftime(
'%b %d, %Y')})
paginator = Paginator(sales, 10)
try:
sales = paginator.page(page)
except PageNotAnInteger:
sales = paginator.page(1)
except InvalidPage:
sales = paginator.page(1)
except EmptyPage:
sales = paginator.page(paginator.num_pages)
if p2_sz:
sales = paginator.page(page)
return TemplateResponse(request, 'dashboard/reports/product_sales/paginate.html',
{'margin':margin, 'order':order, 'sales': sales,'date': datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%b %d, %Y')})
return TemplateResponse(request, 'dashboard/reports/product_sales/search.html',
{'margin':margin, 'order':order, 'sales': sales, 'pn': paginator.num_pages, 'sz': sz,
'q': q,'date': datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%b %d, %Y')})
@staff_member_required
def sales_list_pdf( request ):
if request.is_ajax():
q = request.GET.get( 'q' )
gid = request.GET.get('gid')
order = request.GET.get('order')
margin = False
if gid:
date = request.GET.get('gid')
gid = gid
else:
gid = None
try:
last_sale = Sales.objects.latest('id')
date = DateFormat(last_sale.created).format('Y-m-d')
except:
date = DateFormat(datetime.datetime.today()).format('Y-m-d')
if q is not None:
all_sales = SoldItem.objects.filter(
Q(product_name__icontains=q) |
Q(product_category__icontains=q))
else:
all_sales = SoldItem.objects.all()
if order == 'qlh':
sales = all_sales.filter(sales__created__contains=date). \
values('product_category', 'product_name'). \
annotate(c=Count('product_name', distinct=True)).annotate(Sum('total_cost')). \
annotate(Sum('quantity')).order_by('quantity__sum')
elif order == 'mlh':
items = all_sales.filter(sales__created__contains=date). \
values('sku', 'product_category', 'product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(
Sum('quantity'))
total_items = []
for t in items:
product = ProductVariant.objects.get(sku=t['sku'])
try:
itemPrice = product.get_cost_price().gross * t['quantity__sum']
except ValueError as e:
itemPrice = product.get_cost_price() * t['quantity__sum']
except:
itemPrice = 0
totalSalesCost = t['total_cost__sum']
try:
unitMargin = totalSalesCost - (itemPrice)
except:
unitMargin = 0
t['unitMargin'] = unitMargin
total_items.append(t)
sales = sorted(total_items, key=itemgetter('unitMargin'))
margin = True
elif order == 'mhl':
items = all_sales.filter(sales__created__contains=date). \
values('sku', 'product_category', 'product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(
Sum('quantity'))
total_items = []
for t in items:
product = ProductVariant.objects.get(sku=t['sku'])
try:
itemPrice = product.get_cost_price().gross * t['quantity__sum']
except ValueError as e:
itemPrice = product.get_cost_price() * t['quantity__sum']
except:
itemPrice = 0
totalSalesCost = t['total_cost__sum']
try:
unitMargin = totalSalesCost - (itemPrice)
except:
unitMargin = 0
t['unitMargin'] = unitMargin
total_items.append(t)
sales = sorted(total_items, key=itemgetter('unitMargin'), reverse=True)
margin = True
else:
sales = all_sales.filter(sales__created__contains=date). \
values('product_category','product_name'). \
annotate(c=Count('product_name', distinct=True)).annotate(Sum('total_cost')). \
annotate(Sum('quantity')).order_by('-quantity__sum')
img = default_logo()
data = {
'today': datetime.date.today(),
'sales': sales,
'puller': request.user,
'image': img,
'gid':gid,
'date': datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%b %d, %Y'),
'margin':margin
}
pdf = render_to_pdf('dashboard/reports/product_sales/pdf/list.html', data)
return HttpResponse(pdf, content_type='application/pdf')
| 38.319915
| 170
| 0.670979
| 2,248
| 18,087
| 5.202402
| 0.066726
| 0.033861
| 0.005643
| 0.037794
| 0.877469
| 0.864215
| 0.836682
| 0.836682
| 0.823172
| 0.81599
| 0
| 0.003631
| 0.177641
| 18,087
| 472
| 171
| 38.319915
| 0.782641
| 0
| 0
| 0.812785
| 0
| 0
| 0.174812
| 0.036986
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009132
| false
| 0
| 0.031963
| 0
| 0.075342
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b50ff57a89de092cc00ba0d554cfb73121373b24
| 155
|
py
|
Python
|
sapcc_swift_addons/__init__.py
|
sapcc/swift-addons
|
b3c53e7e4cee981ab386c130f23442f9ec43fc2d
|
[
"Apache-2.0"
] | null | null | null |
sapcc_swift_addons/__init__.py
|
sapcc/swift-addons
|
b3c53e7e4cee981ab386c130f23442f9ec43fc2d
|
[
"Apache-2.0"
] | null | null | null |
sapcc_swift_addons/__init__.py
|
sapcc/swift-addons
|
b3c53e7e4cee981ab386c130f23442f9ec43fc2d
|
[
"Apache-2.0"
] | null | null | null |
from sapcc_swift_addons.sysmeta_domain_override import DomainOverrideMiddleware
from sapcc_swift_addons.in_flight_counter import InFlightCounterMiddleware
| 51.666667
| 79
| 0.935484
| 18
| 155
| 7.611111
| 0.722222
| 0.131387
| 0.20438
| 0.291971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051613
| 155
| 2
| 80
| 77.5
| 0.931973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
82f5c5f1e959e3b3fccacb5256a491ed8f9d8bdc
| 8,165
|
py
|
Python
|
codes/deeplearning/dnn/networks/feat_networks.py
|
sarvai/proposals
|
578c0094db52594cd85acb843df82fe3c19db46d
|
[
"Apache-2.0"
] | null | null | null |
codes/deeplearning/dnn/networks/feat_networks.py
|
sarvai/proposals
|
578c0094db52594cd85acb843df82fe3c19db46d
|
[
"Apache-2.0"
] | null | null | null |
codes/deeplearning/dnn/networks/feat_networks.py
|
sarvai/proposals
|
578c0094db52594cd85acb843df82fe3c19db46d
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import tensorflow.contrib.slim as slim
from .network import network
class feat_net0( network ):
def apply( self, input ):
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
net = input-mean
nets = {}
with slim.arg_scope([slim.conv2d],activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.05)) :
net = slim.conv2d( net, 64, [3,3], scope=self._scope_name('feat_conv1') )
net = slim.conv2d( net, 64, [3,3], scope=self._scope_name('feat_conv2') )
net = slim.max_pool2d( net, [2,2] )
net = slim.conv2d( net, 128, [3,3], scope=self._scope_name('feat_conv3') )
net = slim.conv2d( net, 128, [3,3], scope=self._scope_name('feat_conv4') )
net = slim.max_pool2d( net, [2,2] )
net = slim.conv2d( net, 256, [3,3], scope=self._scope_name('feat_conv5') )
net = slim.max_pool2d( net, [2,2] )
net = slim.conv2d( net, 256, [3,3], scope=self._scope_name('feat_conv6') )
feat8 = {}
feat8['net'] = net
feat8['scale'] = 1.0/8.0
feat8['base_size'] = 8.0
nets['feat8'] = feat8
net = slim.max_pool2d( net, [2,2] )
net = slim.conv2d( net, 512, [3,3], scope=self._scope_name('feat_conv7') )
feat16 = {}
feat16['net'] = net
feat16['scale'] = 1.0/16.0
feat16['base_size'] = 16.0
nets['feat16'] = feat16
return nets
class vgg16( network ):
def apply( self, input ):
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
net = input-mean
nets = {}
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.05)):
net = slim.repeat( net, 2, slim.conv2d, 64, [3, 3], scope=self._scope_name('conv1'))
net = slim.max_pool2d(net, [2, 2], scope=self._scope_name('pool1'))
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope=self._scope_name('conv2'))
net = slim.max_pool2d(net, [2, 2], scope=self._scope_name('pool2'))
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope=self._scope_name('conv3'))
feat4 = {}
feat4['net'] = net
feat4['scale'] = 1.0/4.0
feat4['base_size'] = 4.0
nets['feat4'] = feat4
net = slim.max_pool2d(net, [2, 2], scope=self._scope_name('pool3'))
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope=self._scope_name('conv4'))
feat8 = {}
feat8['net'] = net
feat8['scale'] = 1.0/8.0
feat8['base_size'] = 8.0
nets['feat8'] = feat8
net = slim.max_pool2d(net, [2, 2], scope=self._scope_name('pool4'))
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope=self._scope_name('conv5'))
feat16 = {}
feat16['net'] = net
feat16['scale'] = 1.0/16.0
feat16['base_size'] = 16.0
nets['feat16'] = feat16
return nets
class vgg16_pose( network ):
def apply( self, input ):
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
net = input-mean
nets = {}
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.05)):
net = slim.repeat( net, 2, slim.conv2d, 64, [3, 3], scope=self._scope_name('conv1'))
#net = slim.max_pool2d(net, [2, 2], scope=self._scope_name('pool1'))
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope=self._scope_name('conv2'))
#net = slim.max_pool2d(net, [2, 2], scope=self._scope_name('pool2'))
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope=self._scope_name('conv3'))
#net = slim.max_pool2d(net, [2, 2], scope=self._scope_name('pool3'))
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope=self._scope_name('conv4'))
feat1 = {}
feat1['net'] = net
feat1['scale'] = 1.0
feat1['base_size'] = 1.0
nets['feat8'] = feat1
#net = slim.max_pool2d(net, [2, 2], scope=self._scope_name('pool4'))
#net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope=self._scope_name('conv5'))
#feat16 = {}
#feat16['net'] = net
#feat16['scale'] = 1.0/16.0
#feat16['base_size'] = 16.0
#nets['feat16'] = feat16
return nets
class vgg16_small( network ):
def apply( self, input ):
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
net = input-mean
nets = {}
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.05)):
net = slim.conv2d( net, 64, [3,3], scope=self._scope_name('feat_conv1') )
net = slim.conv2d( net, 64, [3,3], scope=self._scope_name('feat_conv2') )
net = slim.max_pool2d( net, [2,2] )
net = slim.conv2d( net, 128, [3,3], scope=self._scope_name('feat_conv3') )
net = slim.conv2d( net, 128, [3,3], scope=self._scope_name('feat_conv4') )
net = slim.max_pool2d( net, [2,2] )
net = slim.conv2d( net, 256, [3,3], scope=self._scope_name('feat_conv5') )
feat4 = {}
feat4['net'] = net
feat4['scale'] = 1.0/4.0
feat4['base_size'] = 4.0
nets['feat4'] = feat4
net = slim.max_pool2d( net, [2,2] )
net = slim.conv2d( net, 256, [3,3], scope=self._scope_name('feat_conv6') )
feat8 = {}
feat8['net'] = net
feat8['scale'] = 1.0/8.0
feat8['base_size'] = 8.0
nets['feat8'] = feat8
net = slim.max_pool2d( net, [2,2] )
net = slim.conv2d( net, 512, [3,3], scope=self._scope_name('feat_conv7') )
feat16 = {}
feat16['net'] = net
feat16['scale'] = 1.0/16.0
feat16['base_size'] = 16.0
nets['feat16'] = feat16
return nets
class vgg16_very_small( network ):
def apply( self, input ):
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
net = input-mean
nets = {}
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.05)):
net = slim.conv2d( net, 64, [3,3], scope=self._scope_name('feat_conv1') )
net = slim.conv2d( net, 64, [3,3], scope=self._scope_name('feat_conv2') )
net = slim.max_pool2d( net, [2,2] )
net = slim.conv2d( net, 128, [3,3], scope=self._scope_name('feat_conv3') )
net = slim.conv2d( net, 128, [3,3], scope=self._scope_name('feat_conv4') )
feat2 = {}
feat2['net'] = net
feat2['scale'] = 1.0/2.0
feat2['base_size'] = 2.0
nets['feat2'] = feat2
return nets
networks = {}
networks['feat_net0'] = feat_net0
networks['vgg16'] = vgg16
networks['vgg16_small'] = vgg16_small
networks['vgg16_very_small'] = vgg16_very_small
networks['vgg16_pose'] = vgg16_pose
| 40.02451
| 109
| 0.53117
| 1,089
| 8,165
| 3.823691
| 0.078053
| 0.075648
| 0.121037
| 0.15562
| 0.912584
| 0.912584
| 0.912584
| 0.912584
| 0.912584
| 0.912584
| 0
| 0.108084
| 0.307655
| 8,165
| 203
| 110
| 40.221675
| 0.628516
| 0.055971
| 0
| 0.797297
| 0
| 0
| 0.072616
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033784
| false
| 0
| 0.02027
| 0
| 0.121622
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d249cffe11f84ff2b635555aa18c80d59860f69c
| 130
|
py
|
Python
|
python/testData/toxtest/toxPyTestXDist/test_foo.py
|
tgodzik/intellij-community
|
f5ef4191fc30b69db945633951fb160c1cfb7b6f
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/toxtest/toxPyTestXDist/test_foo.py
|
tgodzik/intellij-community
|
f5ef4191fc30b69db945633951fb160c1cfb7b6f
|
[
"Apache-2.0"
] | 2
|
2022-02-19T09:45:05.000Z
|
2022-02-27T20:32:55.000Z
|
python/testData/toxtest/toxPyTestXDist/test_foo.py
|
tgodzik/intellij-community
|
f5ef4191fc30b69db945633951fb160c1cfb7b6f
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def test_doo():
pass
def test_bar():
pass
def test_only_2():
import sys
assert str(sys.version).startswith("2")
| 13
| 43
| 0.638462
| 20
| 130
| 3.95
| 0.65
| 0.265823
| 0.278481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02
| 0.230769
| 130
| 9
| 44
| 14.444444
| 0.77
| 0
| 0
| 0.285714
| 0
| 0
| 0.007692
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.428571
| true
| 0.285714
| 0.142857
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
d24a3f6781fbcd657dddd37b8431da5bb6d80983
| 6,785
|
py
|
Python
|
j5e/network/SocketClient.py
|
jeuxcing/j5e
|
6b809596a3a80da757d431c6174febc1706d36f4
|
[
"MIT"
] | null | null | null |
j5e/network/SocketClient.py
|
jeuxcing/j5e
|
6b809596a3a80da757d431c6174febc1706d36f4
|
[
"MIT"
] | null | null | null |
j5e/network/SocketClient.py
|
jeuxcing/j5e
|
6b809596a3a80da757d431c6174febc1706d36f4
|
[
"MIT"
] | null | null | null |
import socket
import time
from threading import Thread
class SocketClient(Thread):
def __init__(self, verbose=False):
super().__init__()
self.port = 6000
self.stopped = False
self.mailbox = []
self.inbox = []
self.msg_handlers = []
self.verbose = verbose
def port_event(self, event_name, attrs):
if event_name == "connect":
self.port = attrs[1]
def register_msg_handler(self, function):
self.msg_handlers.append(function)
def send(self, msg):
# print(len(msg), msg)
self.mailbox.append(msg)
def run(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
while not self.stopped:
try:
current_port = self.port
sock.setblocking(1)
sock.connect(("127.0.0.1", current_port))
sock.settimeout(0.05)
if self.verbose:
print(f"Socket opened on port {current_port}")
acknowledged = True
last_send = 0.
while current_port == self.port and not self.stopped:
# receive messages
data = None
try:
for i in range(100):
byte = sock.recv(1)
self.inbox.append(byte)
except socket.timeout:
pass
while len(self.inbox) > 0:
# Is there a full message ?
size = int.from_bytes(self.inbox[0], "big")
if size > len(self.inbox)-1:
break
# Transmit message
data = self.inbox[:size+1]
self.inbox = self.inbox[size+1:]
val = int.from_bytes(data[1], "big")
if size == 1 and val == 0xFF:
self.mailbox = self.mailbox[1:]
acknowledged = True
if self.verbose:
print("receiving:", data)
for function in self.msg_handlers:
function(data)
# send messages from the mailbox
msg = None
print("POUET", len(self.mailbox), acknowledged, -last_send + time.time())
if len(self.mailbox) > 0 and acknowledged:
msg = self.mailbox[0]
elif not acknowledged and time.time() - last_send > .1:
msg = self.mailbox[0]
if msg is not None:
sock.setblocking(1)
if self.verbose:
print(f"sending: {msg}")
acknowledged = False
last_send = time.time()
sock.send(bytes([len(msg)]))
sock.send(msg)
sock.settimeout(0.05)
except ConnectionRefusedError:
if self.verbose:
print(f"Connexion refused on port {self.port}")
time.sleep(1)
continue
if self.verbose:
print("Socket closed")
# def run(self):
# with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
# while not self.stopped:
# try:
# current_port = self.port
# sock.setblocking(1)
# sock.connect(("127.0.0.1", current_port))
# sock.settimeout(0.05)
# if self.verbose:
# print(f"Socket opened on port {current_port}")
# acknowledged = True
# last_send = 0.
# while current_port == self.port and not self.stopped:
# # receive messages
# data = None
# try:
# for i in range(100):
# byte = sock.recv(1)
# self.inbox.append(byte)
# except socket.timeout:
# pass
# while len(self.inbox) > 0:
# # Is there a full message ?
# size = int.from_bytes(self.inbox[0], "big")
# if size > len(self.inbox)-1:
# break
# # Transmit message
# data = self.inbox[:size+1]
# self.inbox = self.inbox[size+1:]
# val = int.from_bytes(data[1], "big")
# if size == 1 and val == 0xFF:
# self.mailbox = self.mailbox[1:]
# acknowledged = True
# if self.verbose:
# print("receiving:", data)
# for function in self.msg_handlers:
# function(data)
# # send messages from the mailbox
# msg = None
# print("POUET", len(self.mailbox), acknowledged, -last_send + time.time())
# if len(self.mailbox) > 0 and acknowledged:
# msg = self.mailbox[0]
# elif not acknowledged and time.time() - last_send > .1:
# msg = self.mailbox[0]
# if msg is not None:
# sock.setblocking(1)
# if self.verbose:
# print(f"sending: {msg}")
# acknowledged = False
# last_send = time.time()
# sock.send(bytes([len(msg)]))
# sock.send(msg)
# sock.settimeout(0.05)
# except ConnectionRefusedError:
# if self.verbose:
# print(f"Connexion refused on port {self.port}")
# time.sleep(1)
# continue
# if self.verbose:
# print("Socket closed")
def stop(self):
self.stopped = True
| 39.678363
| 99
| 0.391452
| 586
| 6,785
| 4.462457
| 0.163823
| 0.051625
| 0.049713
| 0.068834
| 0.855449
| 0.855449
| 0.855449
| 0.855449
| 0.855449
| 0.855449
| 0
| 0.021927
| 0.522771
| 6,785
| 171
| 100
| 39.678363
| 0.78567
| 0.420929
| 0
| 0.194805
| 0
| 0
| 0.035492
| 0
| 0
| 0
| 0.001036
| 0
| 0
| 1
| 0.077922
| false
| 0.012987
| 0.038961
| 0
| 0.12987
| 0.077922
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d24d0bdd36a59f427b441c5723ee50ebc6c486c7
| 349
|
py
|
Python
|
MUNDO 1/ex009.py
|
athavus/Curso-em-video-Python-3
|
a32be95adbccfcbe512a1ed30d3859141a230b5e
|
[
"MIT"
] | 1
|
2020-11-12T14:03:32.000Z
|
2020-11-12T14:03:32.000Z
|
MUNDO 1/ex009.py
|
athavus/Curso-em-video-Python-3
|
a32be95adbccfcbe512a1ed30d3859141a230b5e
|
[
"MIT"
] | null | null | null |
MUNDO 1/ex009.py
|
athavus/Curso-em-video-Python-3
|
a32be95adbccfcbe512a1ed30d3859141a230b5e
|
[
"MIT"
] | 1
|
2021-01-05T22:18:46.000Z
|
2021-01-05T22:18:46.000Z
|
n1 = int(input('Digite um número para ver sua tabuada: '))
print('-'*12)
print(f'{n1} X 1 = {n1 * 1} \n{n1} X 2 = {n1 * 2} \n{n1} X 3 = {n1 * 3} \n{n1} x 4 = {n1 * 4} ')
print(f'{n1} X 5 = {n1 * 5} \n{n1} X 6 = {n1 * 6} \n{n1} X 7 = {n1 * 7} \n{n1} X 8 = {n1 * 8}')
print(f'{n1} X 9 = {n1 * 9} \n{n1} X 10 = {n1 * 10}')
print('-'*12)
| 49.857143
| 101
| 0.43553
| 77
| 349
| 1.974026
| 0.324675
| 0.197368
| 0.184211
| 0.177632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.187251
| 0.280802
| 349
| 6
| 102
| 58.166667
| 0.418327
| 0
| 0
| 0.333333
| 0
| 0.5
| 0.769679
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.833333
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
962f1db9f2bd0cff9f53f5c5bd8eef1dc6560f63
| 10,428
|
py
|
Python
|
test/test_LensModel/test_Profiles/test_nie_potential.py
|
heather999/lenstronomy
|
8102fe026c1f3ba6e81d8a1f59cceb90e68430b4
|
[
"MIT"
] | 107
|
2017-08-25T20:03:51.000Z
|
2022-03-30T19:52:21.000Z
|
test/test_LensModel/test_Profiles/test_nie_potential.py
|
heather999/lenstronomy
|
8102fe026c1f3ba6e81d8a1f59cceb90e68430b4
|
[
"MIT"
] | 235
|
2017-06-07T13:30:53.000Z
|
2022-03-28T12:44:04.000Z
|
test/test_LensModel/test_Profiles/test_nie_potential.py
|
heather999/lenstronomy
|
8102fe026c1f3ba6e81d8a1f59cceb90e68430b4
|
[
"MIT"
] | 68
|
2018-02-01T15:47:20.000Z
|
2022-03-27T12:44:32.000Z
|
__author__ = 'gipagano'
import numpy as np
import numpy.testing as npt
import pytest
import lenstronomy.Util.param_util as param_util
from lenstronomy.Util import util
from lenstronomy.LensModel.Profiles.nie_potential import NIE_POTENTIAL
from lenstronomy.LensModel.Profiles.spep import SPEP
class TestNIE_POTENTIAL(object):
"""
tests the NIE_POTENTIAL profile for different rotations
"""
def setup(self):
self.nie_potential = NIE_POTENTIAL()
self.spep = SPEP()
def test_function(self):
y = np.array([1., 2])
x = np.array([0., 0.])
theta_E = 1.
theta_c = 0.
#############
# no rotation
#############
e1, e2 = 0.05, 0.0
eps = np.sqrt(e1**2+e2**2)
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
# map the nie_potential input to the spep input
gamma_spep = 2.
q_spep = np.sqrt(q)
e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep)
theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q)
theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25
# compare the non-rotated output
values = self.nie_potential.function(x, y, theta_E, theta_c, e1, e2)
delta_pot = values[1] - values[0]
values = self.spep.function(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep)
delta_pot_spep = values[1] - values[0]
npt.assert_almost_equal(delta_pot, delta_pot_spep, decimal=4)
############
# rotation 1
############
e1, e2 = 0.05, 0.1
eps = np.sqrt(e1**2+e2**2)
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
# map the nie_potential input to the spep input
gamma_spep = 2.
q_spep = np.sqrt(q)
e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep)
theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q)
theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25
# compare the rotated output
values = self.nie_potential.function(x, y, theta_E, theta_c, e1, e2)
delta_pot = values[1] - values[0]
values = self.spep.function(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep)
delta_pot_spep = values[1] - values[0]
npt.assert_almost_equal(delta_pot, delta_pot_spep, decimal=4)
############
# rotation 2
############
e1, e2 = 0.15, 0.13
eps = np.sqrt(e1**2+e2**2)
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
# map the nie_potential input to the spep input
gamma_spep = 2.
q_spep = np.sqrt(q)
e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep)
theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q)
theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25
# compare the rotated output
values = self.nie_potential.function(x, y, theta_E, theta_c, e1, e2)
delta_pot = values[1] - values[0]
values = self.spep.function(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep)
delta_pot_spep = values[1] - values[0]
npt.assert_almost_equal(delta_pot, delta_pot_spep, decimal=4)
def test_derivatives(self):
x = np.array([1])
y = np.array([2])
theta_E = 1.
theta_c = 0.
#############
# no rotation
#############
e1, e2 = 0.05, 0.0
eps = np.sqrt(e1**2+e2**2)
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
# map the nie_potential input to the spep input
gamma_spep = 2.
q_spep = np.sqrt(q)
e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep)
theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q)
theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25
# compare the non-rotated output
f_x, f_y = self.nie_potential.derivatives(x, y, theta_E, theta_c, e1, e2)
f_x_nie, f_y_nie = self.spep.derivatives(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep)
npt.assert_almost_equal(f_x, f_x_nie, decimal=4)
npt.assert_almost_equal(f_y, f_y_nie, decimal=4)
############
# rotation 1
############
e1, e2 = 0.05, 0.1
eps = np.sqrt(e1**2+e2**2)
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
# map the nie_potential input to the spep input
gamma_spep = 2.
q_spep = np.sqrt(q)
e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep)
theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q)
theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25
# compare the rotated output
f_x, f_y = self.nie_potential.derivatives(x, y, theta_E, theta_c, e1, e2)
f_x_nie, f_y_nie = self.spep.derivatives(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep)
npt.assert_almost_equal(f_x, f_x_nie, decimal=4)
npt.assert_almost_equal(f_y, f_y_nie, decimal=4)
############
# rotation 2
############
e1, e2 = 0.15, 0.13
eps = np.sqrt(e1**2+e2**2)
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
# map the nie_potential input to the spep input
gamma_spep = 2.
q_spep = np.sqrt(q)
e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep)
theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q)
theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25
# compare the rotated output
f_x, f_y = self.nie_potential.derivatives(x, y, theta_E, theta_c, e1, e2)
f_x_nie, f_y_nie = self.spep.derivatives(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep)
npt.assert_almost_equal(f_x, f_x_nie, decimal=4)
npt.assert_almost_equal(f_y, f_y_nie, decimal=4)
def test_hessian(self):
x = np.array([1])
y = np.array([2])
theta_E = 1.
theta_c = 0.
#############
# no rotation
#############
e1, e2 = 0.05, 0.0
eps = np.sqrt(e1**2+e2**2)
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
# map the nie_potential input to the spep input
gamma_spep = 2.
q_spep = np.sqrt(q)
e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep)
theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q)
theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25
# compare the non-rotated output
f_xx, f_xy, f_yx, f_yy = self.nie_potential.hessian(x, y, theta_E, theta_c, e1, e2)
f_xx_nie, f_xy_nie, f_yx_nie, f_yy_nie = self.spep.hessian(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep)
npt.assert_almost_equal(f_xx, f_xx_nie, decimal=4)
npt.assert_almost_equal(f_yy, f_yy_nie, decimal=4)
npt.assert_almost_equal(f_xy, f_xy_nie, decimal=4)
npt.assert_almost_equal(f_yx, f_yx_nie, decimal=4)
############
# rotation 1
############
e1, e2 = 0.05, 0.1
eps = np.sqrt(e1**2+e2**2)
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
# map the nie_potential input to the spep input
gamma_spep = 2.
q_spep = np.sqrt(q)
e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep)
theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q)
theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25
# compare the rotated output
f_xx, f_xy, f_yx, f_yy = self.nie_potential.hessian(x, y, theta_E, theta_c, e1, e2)
f_xx_nie, f_xy_nie, f_yx_nie, f_yy_nie = self.spep.hessian(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep)
npt.assert_almost_equal(f_xx, f_xx_nie, decimal=4)
npt.assert_almost_equal(f_yy, f_yy_nie, decimal=4)
npt.assert_almost_equal(f_xy, f_xy_nie, decimal=4)
npt.assert_almost_equal(f_yx, f_yx_nie, decimal=4)
############
# rotation 2
############
e1, e2 = 0.15, 0.13
eps = np.sqrt(e1**2+e2**2)
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
# map the nie_potential input to the spep input
gamma_spep = 2.
q_spep = np.sqrt(q)
e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep)
theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q)
theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25
# compare the rotated output
f_xx, f_xy, f_yx, f_yy = self.nie_potential.hessian(x, y, theta_E, theta_c, e1, e2)
f_xx_nie, f_xy_nie, f_yx_nie, f_yy_nie = self.spep.hessian(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep)
npt.assert_almost_equal(f_xx, f_xx_nie, decimal=4)
npt.assert_almost_equal(f_yy, f_yy_nie, decimal=4)
npt.assert_almost_equal(f_xy, f_xy_nie, decimal=4)
npt.assert_almost_equal(f_yx, f_yx_nie, decimal=4)
def test_static(self):
x, y = 1., 1.
phi_G, q = 0.3, 0.8
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_lens = {'theta_E': 1., 'theta_c': .1, 'e1': e1, 'e2': e2}
f_ = self.nie_potential.function(x, y, **kwargs_lens)
self.nie_potential.set_static(**kwargs_lens)
f_static = self.nie_potential.function(x, y, **kwargs_lens)
npt.assert_almost_equal(f_, f_static, decimal=8)
self.nie_potential.set_dynamic()
kwargs_lens = {'theta_E': 2., 'theta_c': .1, 'e1': e1, 'e2': e2}
f_dyn = self.nie_potential.function(x, y, **kwargs_lens)
assert f_dyn != f_static
if __name__ == '__main__':
pytest.main()
| 40.107692
| 116
| 0.569333
| 1,607
| 10,428
| 3.372744
| 0.057872
| 0.065314
| 0.070849
| 0.081181
| 0.887454
| 0.879705
| 0.879705
| 0.879705
| 0.848339
| 0.848339
| 0
| 0.045361
| 0.302359
| 10,428
| 259
| 117
| 40.262548
| 0.699656
| 0.079593
| 0
| 0.798742
| 0
| 0
| 0.005572
| 0
| 0
| 0
| 0
| 0
| 0.144654
| 1
| 0.031447
| false
| 0
| 0.044025
| 0
| 0.081761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
964520c7fffd2d0ae148d80f48a8ce2a762ad62b
| 38
|
py
|
Python
|
xiangmu/login.py
|
sunchaoyi/test
|
0e460805cf5eb7b813ece38fd9c356da2eb19754
|
[
"MIT"
] | null | null | null |
xiangmu/login.py
|
sunchaoyi/test
|
0e460805cf5eb7b813ece38fd9c356da2eb19754
|
[
"MIT"
] | null | null | null |
xiangmu/login.py
|
sunchaoyi/test
|
0e460805cf5eb7b813ece38fd9c356da2eb19754
|
[
"MIT"
] | null | null | null |
a = 1
b = 2
c = 3
d = 4
e = 5
f = 6
| 4.222222
| 5
| 0.315789
| 12
| 38
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.526316
| 38
| 9
| 6
| 4.222222
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9688b01bdc012d33f00a77a90ff0a564300fd02c
| 3,313
|
py
|
Python
|
clingine/label.py
|
avancayetano/clingine
|
55e8bd6366aad3ae8e7ac9537fa3ae85efab9ddc
|
[
"MIT"
] | 12
|
2020-04-10T09:10:29.000Z
|
2022-03-12T03:45:08.000Z
|
clingine/label.py
|
avancayetano/clingine
|
55e8bd6366aad3ae8e7ac9537fa3ae85efab9ddc
|
[
"MIT"
] | 6
|
2020-04-11T10:47:01.000Z
|
2020-10-19T14:15:55.000Z
|
clingine/label.py
|
avancayetano/clingine
|
55e8bd6366aad3ae8e7ac9537fa3ae85efab9ddc
|
[
"MIT"
] | 1
|
2021-09-04T00:40:34.000Z
|
2021-09-04T00:40:34.000Z
|
import math
class Label:
def __init__(self, window, text=[""], x=0, y=0, anchor="left", color_pair=None, group=None):
self.window = window
self.text = text
self.x = x
self.y = y
self.anchor = anchor
if color_pair != None:
self.color_pair = tuple(color_pair)
else:
self.color_pair = color_pair
self.group = group
if type(self.group) == list:
self.group.append(self)
def update(self, new_text=None):
self.unrender()
if new_text:
self.text = new_text[:]
def unrender(self):
if self.anchor == "center":
for y in range(len(self.text)):
line = self.text[y]
for x in range(len(line)):
if 0 <= math.floor(self.x) - (len(line) - 1) // 2 + x <= self.window.width - 2 and 0 <= math.floor(self.y) + y <= self.window.height - 2:
is_changed = not(self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) - (len(line) - 1) // 2 + x][1:] == [self.window.char, self.window.color_pair])
if not is_changed:
is_changed = self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) - (len(line) - 1) // 2 + x][0]
self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) - (len(line) - 1) // 2 + x] = [is_changed, self.window.char, self.window.color_pair]
elif self.anchor == "left":
for y in range(len(self.text)):
line = self.text[y]
for x in range(len(line)):
if 0 <= math.floor(self.x) + x <= self.window.width - 2 and 0 <= math.floor(self.y) + y <= self.window.height - 2:
is_changed = not(self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) + x][1:] == [self.window.char, self.window.color_pair])
if not is_changed:
is_changed = self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) + x][0]
self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) + x] = [is_changed, self.window.char, self.window.color_pair]
def render(self):
if self.anchor == "center":
for y in range(len(self.text)):
line = self.text[y]
for x in range(len(line)):
if 0 <= math.floor(self.x) - (len(line) - 1) // 2 + x <= self.window.width - 2 and 0 <= math.floor(self.y) + y <= self.window.height - 2:
is_changed = not(self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) - (len(line) - 1) // 2 + x][1:] == [line[x], self.color_pair])
if not is_changed:
is_changed = self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) - (len(line) - 1) // 2 + x][0]
self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) - (len(line) - 1) // 2 + x] = [is_changed, line[x], self.color_pair]
elif self.anchor == "left":
for y in range(len(self.text)):
line = self.text[y]
for x in range(len(line)):
if 0 <= math.floor(self.x) + x <= self.window.width - 2 and 0 <= math.floor(self.y) + y <= self.window.height - 2:
is_changed = not(self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) + x][1:] == [line[x], self.color_pair])
if not is_changed:
is_changed = self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) + x][0]
self.window.screen_array[math.floor(self.y) + y][math.floor(self.x) + x] = [is_changed, line[x], self.color_pair]
def destroy(self):
self.unrender()
if self.group:
self.group.remove(self)
| 47.328571
| 169
| 0.628433
| 568
| 3,313
| 3.579225
| 0.086268
| 0.141663
| 0.204624
| 0.110182
| 0.808657
| 0.808657
| 0.808657
| 0.808657
| 0.795376
| 0.795376
| 0
| 0.015458
| 0.179897
| 3,313
| 69
| 170
| 48.014493
| 0.732794
| 0
| 0
| 0.491803
| 0
| 0
| 0.007244
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0
| 0.016393
| 0
| 0.114754
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
96925491902a308fdcea70180b8469e7d1d22a89
| 7,067
|
py
|
Python
|
pyvac/tests/views/test_home.py
|
sayoun/pyvac
|
45ade8de2f29864d500e0358e38ebcbd2674a06d
|
[
"BSD-3-Clause"
] | 21
|
2015-11-19T17:36:46.000Z
|
2021-07-02T15:48:21.000Z
|
pyvac/tests/views/test_home.py
|
sayoun/pyvac
|
45ade8de2f29864d500e0358e38ebcbd2674a06d
|
[
"BSD-3-Clause"
] | 28
|
2015-07-03T07:54:48.000Z
|
2022-03-21T22:16:23.000Z
|
pyvac/tests/views/test_home.py
|
sayoun/pyvac
|
45ade8de2f29864d500e0358e38ebcbd2674a06d
|
[
"BSD-3-Clause"
] | 13
|
2015-07-03T07:30:04.000Z
|
2020-07-03T15:22:51.000Z
|
from datetime import datetime
from freezegun import freeze_time
from mock import patch, PropertyMock
from pyvac.tests import case
class HomeTestCase(case.ViewTestCase):
def setUp(self):
super(HomeTestCase, self).setUp()
def tearDown(self):
super(HomeTestCase, self).tearDown()
def test_render_admin_ok(self):
self.config.testing_securitypolicy(userid='admin',
permissive=True)
from pyvac.views import Home
view = Home(self.create_request())()
self.assertEqual(set(view.keys()),
set(['matched_route', 'types', 'csrf_token',
'pyvac', 'holidays', 'sudo_users',
'exception_info_tooltip',
'recovered_info_tooltip', 'recovered_cp',
'futures_approved', 'futures_pending',
'futures_breakdown']))
self.assertEqual(len(view['types']), 6)
def test_render_country_ok(self):
self.config.testing_securitypolicy(userid='manager3',
permissive=True)
from pyvac.views import Home
view = Home(self.create_request())()
self.assertEqual(set(view.keys()),
set(['matched_route', 'types', 'csrf_token',
'pyvac', 'holidays', 'sudo_users',
'exception_info_tooltip',
'recovered_info_tooltip', 'recovered_cp',
'futures_approved', 'futures_pending',
'futures_breakdown']))
self.assertEqual(len(view['types']), 4)
def test_render_holiday_ok(self):
self.config.testing_securitypolicy(userid='manager2',
permissive=True)
from pyvac.views import Home
with freeze_time('2015-12-25',
ignore=['celery', 'psycopg2', 'sqlalchemy',
'icalendar']):
view = Home(self.create_request())()
self.assertEqual(set(view.keys()),
set(['matched_route', 'types', 'csrf_token',
'pyvac', 'holidays', 'sudo_users',
'exception_info_tooltip',
'recovered_info_tooltip', 'recovered_cp',
'futures_approved', 'futures_pending',
'futures_breakdown']))
self.assertEqual(len(view['types']), 5)
self.assertEqual(len(view['holidays']), 22)
def test_render_user_rtt_ok(self):
self.config.testing_securitypolicy(userid='jdoe',
permissive=True)
from pyvac.views import Home
with freeze_time('2014-12-25',
ignore=['celery', 'psycopg2', 'sqlalchemy',
'icalendar']):
with patch('pyvac.models.User.arrival_date',
new_callable=PropertyMock) as mock_foo:
mock_foo.return_value = datetime(2014, 1, 1)
view = Home(self.create_request())()
self.assertEqual(set(view.keys()),
set(['matched_route', 'types',
'csrf_token',
'pyvac', 'holidays', 'sudo_users',
'exception_info_tooltip',
'recovered_info_tooltip',
'recovered_cp',
'futures_approved', 'futures_pending',
'futures_breakdown']))
self.assertEqual(len(view['types']), 5)
view_user = view['pyvac']['user']
view_user.rtt = view_user.get_rtt_usage(self.session)
self.assertTrue(view_user.rtt)
expected = {'allowed': 10, 'left': 9.5, 'state': 'warning',
'taken': 0.5, 'year': 2014}
self.assertEqual(view_user.rtt, expected)
with freeze_time('2011-01-02',
ignore=['celery', 'psycopg2', 'sqlalchemy',
'icalendar']):
with patch('pyvac.models.User.arrival_date',
new_callable=PropertyMock) as mock_foo:
mock_foo.return_value = datetime(2011, 1, 1)
view = Home(self.create_request())()
self.assertEqual(set(view.keys()),
set(['matched_route', 'types',
'csrf_token',
'pyvac', 'holidays', 'sudo_users',
'exception_info_tooltip',
'recovered_info_tooltip',
'recovered_cp',
'futures_approved', 'futures_pending',
'futures_breakdown']))
self.assertEqual(len(view['types']), 5)
view_user = view['pyvac']['user']
view_user.rtt = view_user.get_rtt_usage(self.session)
self.assertTrue(view_user.rtt)
expected = {'allowed': 1, 'left': 0.5, 'state': 'success',
'taken': 0.5, 'year': 2011}
self.assertEqual(view_user.rtt, expected)
# testing that we take count of all type of requests
# PENDING, ACCEPTED_MANAGER, APPROVED_ADMIN
with freeze_time('2016-05-02',
ignore=['celery', 'psycopg2', 'sqlalchemy',
'icalendar']):
with patch('pyvac.models.User.arrival_date',
new_callable=PropertyMock) as mock_foo:
mock_foo.return_value = datetime(2016, 1, 1)
view = Home(self.create_request())()
self.assertEqual(set(view.keys()),
set(['matched_route', 'types',
'csrf_token',
'pyvac', 'holidays', 'sudo_users',
'exception_info_tooltip',
'recovered_info_tooltip',
'recovered_cp',
'futures_approved', 'futures_pending',
'futures_breakdown']))
self.assertEqual(len(view['types']), 5)
view_user = view['pyvac']['user']
view_user.rtt = view_user.get_rtt_usage(self.session)
self.assertTrue(view_user.rtt)
expected = {'allowed': 5, 'left': 2.0, 'state': 'success',
'taken': 3.0, 'year': 2016}
self.assertEqual(view_user.rtt, expected)
| 50.841727
| 76
| 0.466959
| 599
| 7,067
| 5.288815
| 0.198664
| 0.075758
| 0.075758
| 0.048611
| 0.81029
| 0.81029
| 0.778093
| 0.708965
| 0.708965
| 0.708965
| 0
| 0.022593
| 0.423801
| 7,067
| 138
| 77
| 51.210145
| 0.755403
| 0.013018
| 0
| 0.736
| 0
| 0
| 0.197648
| 0.050775
| 0
| 0
| 0
| 0
| 0.152
| 1
| 0.048
| false
| 0
| 0.064
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
96a0c0b8b604897f799ec915e2637f6b08b1a190
| 18,332
|
py
|
Python
|
tests/test_api.py
|
w1ll1am23/simplisafe-python
|
8d87b6562e5f353fab438d69476079a9db031618
|
[
"MIT"
] | 3
|
2017-05-21T16:49:38.000Z
|
2018-07-05T16:16:45.000Z
|
tests/test_api.py
|
w1ll1am23/simplisafe-python
|
8d87b6562e5f353fab438d69476079a9db031618
|
[
"MIT"
] | 2
|
2017-07-20T11:57:23.000Z
|
2018-09-24T03:03:19.000Z
|
tests/test_api.py
|
w1ll1am23/simplisafe-python
|
8d87b6562e5f353fab438d69476079a9db031618
|
[
"MIT"
] | 7
|
2017-04-15T05:52:09.000Z
|
2018-08-19T01:49:54.000Z
|
"""Define base API tests."""
# pylint: disable=protected-access,too-many-arguments
import asyncio
from datetime import datetime, timedelta
from unittest.mock import AsyncMock, MagicMock, Mock, patch
import aiohttp
import pytest
from simplipy.api import API
from simplipy.errors import (
InvalidCredentialsError,
RequestError,
SimplipyError,
Verify2FAError,
)
from .common import (
TEST_ACCESS_TOKEN,
TEST_PASSWORD,
TEST_REFRESH_TOKEN,
TEST_SUBSCRIPTION_ID,
TEST_USERNAME,
)
@pytest.mark.asyncio
async def test_2fa_sms_exceeded(aresponses, login_resp_sms_exceeded):
"""Test that a "SMS limit exceeded" 2FA error is caught."""
aresponses.add(
"auth.simplisafe.com",
"/authorize",
"get",
response=aresponses.Response(
text=None,
status=302,
headers={"Location": "/u/login?state=12345"},
),
)
aresponses.add(
"auth.simplisafe.com",
"/u/login",
"post",
response=aresponses.Response(
text=login_resp_sms_exceeded,
status=400,
),
)
async with aiohttp.ClientSession() as session:
with pytest.raises(Verify2FAError):
await API.async_from_credentials(
TEST_USERNAME, TEST_PASSWORD, session=session
)
@pytest.mark.asyncio
async def test_401_bad_credentials(aresponses, login_resp_invalid_username_password):
"""Test invalid credentials."""
aresponses.add(
"auth.simplisafe.com",
"/authorize",
"get",
response=aresponses.Response(
text=None,
status=302,
headers={"Location": "/u/login?state=12345"},
),
)
aresponses.add(
"auth.simplisafe.com",
"/u/login",
"post",
response=aresponses.Response(
text=login_resp_invalid_username_password,
status=400,
),
)
async with aiohttp.ClientSession() as session:
with pytest.raises(InvalidCredentialsError):
await API.async_from_credentials(
TEST_USERNAME, TEST_PASSWORD, session=session
)
aresponses.assert_plan_strictly_followed()
@pytest.mark.asyncio
async def test_401_refresh_token_failure(
aresponses, invalid_refresh_token_response, server
):
"""Test that an error is raised when refresh token and reauth both fail."""
server.add(
"api.simplisafe.com",
f"/v1/users/{TEST_SUBSCRIPTION_ID}/subscriptions",
"get",
response=aresponses.Response(text="Unauthorized", status=401),
)
server.add(
"auth.simplisafe.com",
"/oauth/token",
"post",
response=aiohttp.web_response.json_response(
invalid_refresh_token_response,
status=403,
),
)
async with aiohttp.ClientSession() as session:
simplisafe = await API.async_from_credentials(
TEST_USERNAME, TEST_PASSWORD, session=session
)
await simplisafe.async_verify_2fa_email()
# Manually set the expiration datetime to force a refresh token flow:
simplisafe._token_last_refreshed = datetime.utcnow() - timedelta(seconds=30)
with pytest.raises(InvalidCredentialsError):
await simplisafe.async_get_systems()
aresponses.assert_plan_strictly_followed()
@pytest.mark.asyncio
async def test_401_refresh_token_success(
api_token_response,
aresponses,
auth_check_response,
server,
v2_settings_response,
v2_subscriptions_response,
):
"""Test that a successful refresh token carries out the original request."""
server.add(
"api.simplisafe.com",
f"/v1/users/{TEST_SUBSCRIPTION_ID}/subscriptions",
"get",
response=aresponses.Response(text="Unauthorized", status=401),
)
api_token_response["access_token"] = "jjhhgg66"
api_token_response["refresh_token"] = "aabbcc11"
server.add(
"auth.simplisafe.com",
"/oauth/token",
"post",
response=aiohttp.web_response.json_response(api_token_response, status=200),
)
server.add(
"api.simplisafe.com",
"/v1/api/authCheck",
"get",
response=aiohttp.web_response.json_response(auth_check_response, status=200),
)
server.add(
"api.simplisafe.com",
f"/v1/users/{TEST_SUBSCRIPTION_ID}/subscriptions",
"get",
response=aiohttp.web_response.json_response(
v2_subscriptions_response, status=200
),
)
server.add(
"api.simplisafe.com",
f"/v1/subscriptions/{TEST_SUBSCRIPTION_ID}/settings",
"get",
response=aiohttp.web_response.json_response(v2_settings_response, status=200),
)
async with aiohttp.ClientSession() as session:
simplisafe = await API.async_from_credentials(
TEST_USERNAME, TEST_PASSWORD, session=session
)
await simplisafe.async_verify_2fa_email()
# Manually set the expiration datetime to force a refresh token flow:
simplisafe._token_last_refreshed = datetime.utcnow() - timedelta(seconds=30)
# If this succeeds without throwing an exception, the retry is successful:
await simplisafe.async_get_systems()
assert simplisafe.access_token == "jjhhgg66"
assert simplisafe.refresh_token == "aabbcc11"
aresponses.assert_plan_strictly_followed()
@pytest.mark.asyncio
async def test_403_bad_credentials(aresponses, login_resp_invalid_username_password):
"""Test that an InvalidCredentialsError is raised with a 403."""
aresponses.add(
"auth.simplisafe.com",
"/authorize",
"get",
response=aresponses.Response(
text=None,
status=302,
headers={"Location": "/u/login?state=12345"},
),
)
aresponses.add(
"auth.simplisafe.com",
"/u/login",
"post",
response=aresponses.Response(
text=login_resp_invalid_username_password,
status=400,
),
)
async with aiohttp.ClientSession() as session:
with pytest.raises(InvalidCredentialsError):
await API.async_from_credentials(
TEST_USERNAME, TEST_PASSWORD, session=session
)
@pytest.mark.asyncio
async def test_client_async_from_refresh_token(
api_token_response, aresponses, auth_check_response
):
"""Test creating a client from a refresh token."""
aresponses.add(
"auth.simplisafe.com",
"/oauth/token",
"post",
response=aiohttp.web_response.json_response(api_token_response, status=200),
)
aresponses.add(
"api.simplisafe.com",
"/v1/api/authCheck",
"get",
response=aiohttp.web_response.json_response(auth_check_response, status=200),
)
async with aiohttp.ClientSession() as session:
simplisafe = await API.async_from_refresh_token(
TEST_REFRESH_TOKEN, session=session
)
assert simplisafe.access_token == TEST_ACCESS_TOKEN
assert simplisafe.refresh_token == TEST_REFRESH_TOKEN
aresponses.assert_plan_strictly_followed()
@pytest.mark.asyncio
async def test_client_async_from_refresh_token_http_error(aresponses, server):
"""Test that an error is when refreshing a token yields an HTTP error."""
server.add(
"api.simplisafe.com",
f"/v1/users/{TEST_SUBSCRIPTION_ID}/subscriptions",
"get",
response=aresponses.Response(text="Unauthorized", status=401),
)
server.add(
"auth.simplisafe.com",
"/oauth/token",
"post",
response=aiohttp.web_response.json_response("Bad Request", status=400),
)
async with aiohttp.ClientSession() as session:
simplisafe = await API.async_from_credentials(
TEST_USERNAME, TEST_PASSWORD, session=session
)
await simplisafe.async_verify_2fa_email()
with pytest.raises(RequestError):
await API.async_from_refresh_token(TEST_REFRESH_TOKEN, session=session)
aresponses.assert_plan_strictly_followed()
@pytest.mark.asyncio
async def test_client_async_from_refresh_token_unknown_error():
"""Test an unknown error while creating a client from a refresh token."""
with patch(
"simplipy.api.ClientSession",
MagicMock(request=AsyncMock(side_effect=Exception)),
) as session:
with pytest.raises(SimplipyError):
await API.async_from_refresh_token(TEST_REFRESH_TOKEN, session=session)
@pytest.mark.asyncio
async def test_refresh_token_callback(
api_token_response,
aresponses,
server,
v2_settings_response,
v2_subscriptions_response,
):
"""Test that callbacks are executed correctly."""
server.add(
"api.simplisafe.com",
f"/v1/users/{TEST_SUBSCRIPTION_ID}/subscriptions",
"get",
response=aresponses.Response(text="Unauthorized", status=401),
)
server.add(
"api.simplisafe.com",
f"/v1/subscriptions/{TEST_SUBSCRIPTION_ID}/settings",
"get",
response=aresponses.Response(text="Unauthorized", status=401),
)
api_token_response["access_token"] = "jjhhgg66"
api_token_response["refresh_token"] = "aabbcc11"
server.add(
"auth.simplisafe.com",
"/oauth/token",
"post",
response=aiohttp.web_response.json_response(api_token_response, status=200),
)
server.add(
"api.simplisafe.com",
f"/v1/users/{TEST_SUBSCRIPTION_ID}/subscriptions",
"get",
response=aiohttp.web_response.json_response(
v2_subscriptions_response, status=200
),
)
server.add(
"api.simplisafe.com",
f"/v1/subscriptions/{TEST_SUBSCRIPTION_ID}/settings",
"get",
response=aiohttp.web_response.json_response(v2_settings_response, status=200),
)
mock_callback_1 = Mock()
mock_callback_2 = Mock()
async with aiohttp.ClientSession() as session:
simplisafe = await API.async_from_credentials(
TEST_USERNAME, TEST_PASSWORD, session=session
)
await simplisafe.async_verify_2fa_email()
# Manually set the expiration datetime to force a refresh token flow:
simplisafe._token_last_refreshed = datetime.utcnow() - timedelta(seconds=30)
# We'll hang onto one callback:
simplisafe.add_refresh_token_callback(mock_callback_1)
assert mock_callback_1.call_count == 0
# ..and delete the a second one before ever using it:
remove = simplisafe.add_refresh_token_callback(mock_callback_2)
remove()
await simplisafe.async_get_systems()
await asyncio.sleep(1)
mock_callback_1.assert_called_once_with("aabbcc11")
assert mock_callback_1.call_count == 1
assert mock_callback_2.call_count == 0
@pytest.mark.asyncio
async def test_request_retry(
api_token_response,
aresponses,
server,
v2_settings_response,
v2_subscriptions_response,
):
"""Test that request retries work."""
server.add(
"api.simplisafe.com",
f"/v1/users/{TEST_SUBSCRIPTION_ID}/subscriptions",
"get",
response=aresponses.Response(text="Conflict", status=409),
)
server.add(
"api.simplisafe.com",
f"/v1/users/{TEST_SUBSCRIPTION_ID}/subscriptions",
"get",
response=aresponses.Response(text="Conflict", status=409),
)
server.add(
"auth.simplisafe.com",
"/oauth/token",
"post",
response=aiohttp.web_response.json_response(api_token_response, status=200),
)
server.add(
"api.simplisafe.com",
f"/v1/users/{TEST_SUBSCRIPTION_ID}/subscriptions",
"get",
response=aiohttp.web_response.json_response(
v2_subscriptions_response, status=200
),
)
server.add(
"api.simplisafe.com",
f"/v1/subscriptions/{TEST_SUBSCRIPTION_ID}/settings",
"get",
response=aiohttp.web_response.json_response(v2_settings_response, status=200),
)
async with aiohttp.ClientSession() as session:
simplisafe = await API.async_from_credentials(
TEST_USERNAME, TEST_PASSWORD, session=session
)
await simplisafe.async_verify_2fa_email()
simplisafe.disable_request_retries()
with pytest.raises(RequestError):
await simplisafe.async_get_systems()
simplisafe.enable_request_retries()
# If this succeeds without throwing an exception, the retry is successful:
await simplisafe.async_get_systems()
aresponses.assert_plan_strictly_followed()
@pytest.mark.asyncio
async def test_unknown_auth0_url(aresponses):
"""Test that an error while obtaining the Auth0 login URL is caught."""
aresponses.add(
"auth.simplisafe.com",
"/authorize",
"get",
response=aresponses.Response(
text=None,
status=400,
),
)
async with aiohttp.ClientSession() as session:
with pytest.raises(SimplipyError):
await API.async_from_credentials(
TEST_USERNAME, TEST_PASSWORD, session=session
)
@pytest.mark.asyncio
async def test_unknown_resume_url(
aresponses,
login_resp_verification_pending_email,
login_resp_verification_successful,
):
"""Test that an error while obtaining the Auth0 post-auth resume URL is caught."""
aresponses.add(
"auth.simplisafe.com",
"/authorize",
"get",
response=aresponses.Response(
text=None,
status=302,
headers={"Location": "/u/login?state=12345"},
),
)
aresponses.add(
"auth.simplisafe.com",
"/u/login",
"post",
response=aresponses.Response(
text=None,
status=302,
headers={"Location": "/authorize/resume?state=12345"},
),
)
aresponses.add(
"auth.simplisafe.com",
"/authorize/resume",
"get",
response=aresponses.Response(
text=None,
status=302,
headers={
"Location": (
"https://tsv.prd.platform.simplisafe.com/v1/tsv/check"
"?token=12345&state=12345"
)
},
),
)
aresponses.add(
"tsv.prd.platform.simplisafe.com",
"/v1/tsv/check",
"get",
response=aresponses.Response(
text=login_resp_verification_pending_email,
status=200,
),
)
aresponses.add(
"tsv.prd.platform.simplisafe.com",
"/v1/tsv/check",
"get",
response=aresponses.Response(
text=login_resp_verification_successful,
status=200,
),
)
aresponses.add(
"auth.simplisafe.com",
"/continue",
"post",
response=aresponses.Response(
text=None,
status=302,
headers={"Location": "/authorize/resume?state=12345"},
),
)
aresponses.add(
"auth.simplisafe.com",
"/authorize/resume",
"get",
response=aresponses.Response(
text=None,
status=400,
),
)
async with aiohttp.ClientSession() as session:
with pytest.raises(SimplipyError):
simplisafe = await API.async_from_credentials(
TEST_USERNAME, TEST_PASSWORD, session=session
)
await simplisafe.async_verify_2fa_email()
@pytest.mark.asyncio
async def test_unknown_token_response(
aresponses,
login_resp_verification_pending_email,
login_resp_verification_successful,
):
"""Test that an error while submitting the initial token request is handled."""
aresponses.add(
"auth.simplisafe.com",
"/authorize",
"get",
response=aresponses.Response(
text=None,
status=302,
headers={"Location": "/u/login?state=12345"},
),
)
aresponses.add(
"auth.simplisafe.com",
"/u/login",
"post",
response=aresponses.Response(
text=None,
status=302,
headers={"Location": "/authorize/resume?state=12345"},
),
)
aresponses.add(
"auth.simplisafe.com",
"/authorize/resume",
"get",
response=aresponses.Response(
text=None,
status=302,
headers={
"Location": (
"https://tsv.prd.platform.simplisafe.com/v1/tsv/check"
"?token=12345&state=12345"
)
},
),
)
aresponses.add(
"tsv.prd.platform.simplisafe.com",
"/v1/tsv/check",
"get",
response=aresponses.Response(
text=login_resp_verification_pending_email,
status=200,
),
)
aresponses.add(
"tsv.prd.platform.simplisafe.com",
"/v1/tsv/check",
"get",
response=aresponses.Response(
text=login_resp_verification_successful,
status=200,
),
)
aresponses.add(
"auth.simplisafe.com",
"/continue",
"post",
response=aresponses.Response(
text=None,
status=302,
headers={"Location": "/authorize/resume?state=12345"},
),
)
aresponses.add(
"auth.simplisafe.com",
"/authorize/resume",
"get",
response=aresponses.Response(
text=None,
status=302,
headers={"Location": "https://webapp.simplisafe.com/new?code=12345"},
),
)
aresponses.add(
"auth.simplisafe.com",
"/oauth/token",
"post",
response=aresponses.Response(
text=None,
status=400,
),
)
async with aiohttp.ClientSession() as session:
with pytest.raises(SimplipyError):
simplisafe = await API.async_from_credentials(
TEST_USERNAME, TEST_PASSWORD, session=session
)
await simplisafe.async_verify_2fa_email()
| 29.425361
| 86
| 0.620772
| 1,891
| 18,332
| 5.813855
| 0.102062
| 0.054393
| 0.068583
| 0.079134
| 0.862198
| 0.845552
| 0.830544
| 0.803893
| 0.795525
| 0.774695
| 0
| 0.022017
| 0.274056
| 18,332
| 622
| 87
| 29.472669
| 0.804103
| 0.027657
| 0
| 0.772388
| 0
| 0
| 0.158065
| 0.054375
| 0
| 0
| 0
| 0
| 0.026119
| 1
| 0
| false
| 0.029851
| 0.014925
| 0
| 0.014925
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73fe4d390052ed1f59daa775bebcd6f58712f064
| 27,936
|
py
|
Python
|
tests/unit/pypyr/steps/dsl/cmd_test.py
|
pypyr/pypyr-cli
|
dc0f694ac0c0e3c2844c1a20788c9af586a8a16e
|
[
"Apache-2.0"
] | 31
|
2017-03-24T11:27:34.000Z
|
2020-05-27T20:06:28.000Z
|
tests/unit/pypyr/steps/dsl/cmd_test.py
|
pypyr/pypyr-cli
|
dc0f694ac0c0e3c2844c1a20788c9af586a8a16e
|
[
"Apache-2.0"
] | 89
|
2017-04-12T09:50:32.000Z
|
2020-08-13T13:18:36.000Z
|
tests/unit/pypyr/steps/dsl/cmd_test.py
|
pypyr/pypyr-cli
|
dc0f694ac0c0e3c2844c1a20788c9af586a8a16e
|
[
"Apache-2.0"
] | 6
|
2017-06-04T14:19:59.000Z
|
2020-02-10T13:16:40.000Z
|
"""cmd.py unit tests."""
import logging
import subprocess
from unittest.mock import call, patch
import pytest
from pypyr.config import config
from pypyr.context import Context
from pypyr.dsl import SicString
from pypyr.errors import (ContextError,
KeyInContextHasNoValueError,
KeyNotInContextError)
from pypyr.steps.dsl.cmd import CmdStep
from pypyr.subproc import Command
from tests.common.utils import patch_logger
is_windows = config.is_windows
sp_mod_name = 'pypyr.subproc'
def get_plat(posix, windows):
"""Return windows if platform is windows, else posix."""
return windows if is_windows else posix
def test_cmdstep_name_required():
"""Cmd Step requires name."""
with pytest.raises(AssertionError):
CmdStep(None, None)
def test_cmdstep_context_required():
"""Cmd Step requires context."""
with pytest.raises(AssertionError):
CmdStep('blah', None)
def test_cmdstep_context_cmd_required():
"""Cmd Step requires cmd in context."""
with pytest.raises(KeyNotInContextError) as err:
CmdStep('blah', Context({'a': 'b'}))
assert str(err.value) == ("context['cmd'] doesn't exist. It must exist "
"for blah.")
def test_cmdstep_context_cmd_not_none():
"""Cmd Step requires cmd in context."""
with pytest.raises(KeyInContextHasNoValueError) as err:
CmdStep('blah', Context({'cmd': None}))
assert str(err.value) == "context['cmd'] must have a value for blah."
def test_cmdstep_context_cmd_not_dict():
"""Cmd Step requires cmd in context to be a dict if not str."""
with pytest.raises(ContextError) as err:
CmdStep('blah', Context({'cmd': 1}))
assert str(err.value) == (
"""blah cmd config should be either a simple string:
cmd: my-executable --arg
or a dictionary:
cmd:
run: subdir/my-executable --arg
cwd: ./mydir
or a list of commands:
cmd:
- my-executable --arg
- run: another-executable --arg value
cwd: ../mydir/subdir""")
def test_dsl_cmd_list_must_be_str_or_dict():
"""Each list input must be a string or a dict."""
with pytest.raises(ContextError) as err:
CmdStep('blah', Context({'cmd': ['cmd1', 123]}))
assert str(err.value) == ("""\
123 in blah cmd config is wrong.
Each list item should be either a simple string or a dict for expanded syntax:
cmd:
- my-executable --arg
- run: another-executable --arg value
cwd: ../mydir/subdir
- run:
- arb-executable1 --arg value1
- arb-executable2 --arg value2
cwd: ../mydir/arbdir""")
def test_dsl_cmd_dict_run_must_exist():
"""Dict input run must exist."""
with pytest.raises(ContextError) as err:
CmdStep('blah', Context({'cmd': {'runs': 'abc'}}))
# noqa is for line too long
assert str(err.value) == ("""\
cmd.run doesn't exist for blah.
The input should look like this in the simplified syntax:
cmd: my-executable-here --arg1
Or in the expanded syntax:
cmd:
run: my-executable-here --arg1
If you're passing in a list of commands, each command should be a simple string,
or a dict with a `run` entry:
cmd:
- my-executable --arg
- run: another-executable --arg value
cwd: ../mydir/subdir
- run:
- arb-executable1 --arg value1
- arb-executable2 --arg value2
cwd: ../mydir/arbdir""") # noqa: E501
def test_dsl_cmd_dict_run_must_have_value():
"""Dict input run must have value."""
with pytest.raises(ContextError) as err:
CmdStep('blah', Context({'cmd': {'run': ''}}))
assert str(err.value) == ("""\
cmd.run must have a value for blah.
The `run` input should look something like this:
cmd:
run: my-executable-here --arg1
cwd: ./mydir/subdir
Or, `run` could be a list of commands:
cmd:
run:
- arb-executable1 --arg value1
- arb-executable2 --arg value2
cwd: ../mydir/arbdir""")
def test_dsl_cmd_must_be_str_or_list():
"""Input to cmd must be a str or a list."""
with pytest.raises(ContextError) as err:
cmd = CmdStep('blah', Context({'cmd': {'run': 123}}))
cmd.run_step()
assert str(err.value) == ("""\
123 cmd should be either a simple string:
cmd: my-executable --arg
Or in the expanded syntax, set `run` to a string:
cmd:
run: my-executable --arg
cwd: ./mydir
Or set run to a list of commands:
cmd:
run:
- my-executable --arg
- another-executable --arg2
cwd: ../mydir/subdir""")
def test_cmdstep_cmd_is_string():
"""Str command is always not is_save."""
obj = CmdStep('blahname', Context({'cmd': 'blah'}))
assert not obj.is_shell
assert obj.logger.name == 'blahname'
assert obj.context == Context({'cmd': 'blah'})
assert obj.commands == [Command('blah',
cwd=None,
is_shell=False,
is_save=False)]
def test_cmdstep_cmd_is_dict_default_save_false():
"""Dict command defaults not is_save."""
obj = CmdStep('blahname', Context({'cmd': {'run': 'blah'}}))
assert not obj.is_shell
assert obj.logger.name == 'blahname'
assert obj.context == Context({'cmd': {'run': 'blah'}})
assert obj.commands == [Command('blah',
cwd=None,
is_shell=False,
is_save=False)]
def test_cmdstep_cmd_is_dict_default_save_true():
"""Dict command with is_save true."""
obj = CmdStep('blahname', Context({'cmd': {'run': 'blah',
'save': True}}),
is_shell=False)
assert not obj.is_shell
assert obj.logger.name == 'blahname'
assert obj.context == Context({'cmd': {'run': 'blah', 'save': True}})
assert obj.commands == [Command('blah',
cwd=None,
is_shell=False,
is_save=True)]
def test_cmdstep_cmd_is_dict_cwd():
"""Cwd assigns."""
obj = CmdStep('blahname', Context({'cmd': {'run': 'blah',
'cwd': 'pathhere'}}),
is_shell=False)
assert not obj.is_shell
assert obj.logger.name == 'blahname'
assert obj.context == Context({'cmd': {'run': 'blah', 'cwd': 'pathhere'}})
assert obj.commands == [Command('blah',
cwd='pathhere',
is_shell=False,
is_save=False)]
def test_dsl_cmd_shell_override():
"""Override shell arg from dict shell input."""
obj = CmdStep('blahname', Context({'cmd': {'run': 'blah',
'cwd': 'pathhere',
'shell': True,
}}),
is_shell=False)
assert not obj.is_shell
assert obj.logger.name == 'blahname'
assert len(obj.commands) == 1
cmd = obj.commands[0]
assert cmd.is_shell
assert not cmd.is_save
def test_cmdstep_cmd_is_dict_cwd_none():
"""Explicit None on cwd."""
obj = CmdStep('blahname', Context({'cmd': {'run': 'blah',
'cwd': None}}))
assert not obj.is_shell
assert obj.logger.name == 'blahname'
assert obj.context == Context({'cmd': {'run': 'blah', 'cwd': None}})
assert obj.commands == [Command('blah',
cwd=None,
is_shell=False,
is_save=False)]
def test_cmdstep_runstep_cmd_is_string_shell_false():
"""Str command is always not is_save."""
obj = CmdStep('blahname', Context({'cmd': 'blah -blah1 --blah2'}),
is_shell=False)
assert not obj.is_shell
assert obj.logger.name == 'blahname'
assert obj.context == Context({'cmd': 'blah -blah1 --blah2'})
assert obj.commands == [Command('blah -blah1 --blah2',
cwd=None,
is_shell=False,
is_save=False)]
with patch_logger(sp_mod_name, logging.DEBUG) as mock_logger_debug:
with patch('subprocess.run') as mock_run:
obj.run_step()
assert mock_logger_debug.mock_calls == [
call('stdout & stderr inheriting from parent process.'),
call('Processing command string: blah -blah1 --blah2')]
expected_cmd = get_plat(['blah', '-blah1', '--blah2'],
'blah -blah1 --blah2')
mock_run.assert_called_once_with(expected_cmd,
cwd=None,
shell=False,
check=True,
stdout=None,
stderr=None)
def test_cmdstep_runstep_cmd_is_string_shell_false_force_no_win(monkeypatch):
"""Force not windows."""
monkeypatch.setattr('pypyr.subproc.config._is_windows', False)
obj = CmdStep('blahname', Context({'cmd': 'blah -blah1 --blah2'}),
is_shell=False)
assert not obj.is_shell
assert obj.logger.name == 'blahname'
assert obj.context == Context({'cmd': 'blah -blah1 --blah2'})
assert obj.commands == [Command('blah -blah1 --blah2',
cwd=None,
is_shell=False,
is_save=False)]
with patch_logger(sp_mod_name, logging.DEBUG) as mock_logger_debug:
with patch('subprocess.run') as mock_run:
obj.run_step()
assert mock_logger_debug.mock_calls == [
call('stdout & stderr inheriting from parent process.'),
call('Processing command string: blah -blah1 --blah2')]
mock_run.assert_called_once_with(['blah', '-blah1', '--blah2'],
cwd=None,
shell=False,
check=True,
stdout=None,
stderr=None)
def test_cmdstep_runstep_cmd_is_string_formatting_shell_false():
"""Str command is always not is_save and works with formatting."""
obj = CmdStep('blahname', Context({'k1': 'blah',
'cmd': '{k1} -{k1}1 --{k1}2'}),
is_shell=False)
assert not obj.is_shell
assert obj.logger.name == 'blahname'
assert obj.context == Context({'k1': 'blah',
'cmd': '{k1} -{k1}1 --{k1}2'})
assert obj.commands == [Command('blah -blah1 --blah2',
cwd=None,
is_shell=False,
is_save=False)]
with patch_logger(sp_mod_name, logging.DEBUG) as mock_logger_debug:
with patch('subprocess.run') as mock_run:
obj.run_step()
assert mock_logger_debug.mock_calls == [
call('stdout & stderr inheriting from parent process.'),
call('Processing command string: blah -blah1 --blah2')]
expected_cmd = get_plat(['blah', '-blah1', '--blah2'],
'blah -blah1 --blah2')
mock_run.assert_called_once_with(expected_cmd,
cwd=None,
shell=False,
check=True,
stdout=None,
stderr=None)
def test_cmdstep_runstep_cmd_is_string_formatting_shell_false_sic():
"""Command process special tag directive."""
obj = CmdStep('blahname',
Context({'k1': 'blah',
'cmd': SicString('{k1} -{k1}1 --{k1}2')}),
is_shell=False)
assert not obj.is_shell
assert obj.logger.name == 'blahname'
assert obj.context == Context({'k1': 'blah',
'cmd': SicString('{k1} -{k1}1 --{k1}2')})
assert obj.commands == [Command('{k1} -{k1}1 --{k1}2',
cwd=None,
is_shell=False,
is_save=False)]
with patch_logger(sp_mod_name, logging.DEBUG) as mock_logger_debug:
with patch('subprocess.run') as mock_run:
obj.run_step()
assert mock_logger_debug.mock_calls == [
call('stdout & stderr inheriting from parent process.'),
call('Processing command string: {k1} -{k1}1 --{k1}2')]
expected_cmd = get_plat(['{k1}', '-{k1}1', '--{k1}2'],
'{k1} -{k1}1 --{k1}2')
mock_run.assert_called_once_with(expected_cmd,
cwd=None,
shell=False,
check=True,
stdout=None,
stderr=None)
def test_cmdstep_runstep_cmd_is_string_shell_true():
"""Str command is always not is_save."""
obj = CmdStep('blahname',
Context({'cmd': 'blah -blah1 --blah2'}),
is_shell=True)
assert obj.is_shell
assert obj.logger.name == 'blahname'
assert obj.context == Context({'cmd': 'blah -blah1 --blah2'})
assert obj.commands == [Command('blah -blah1 --blah2',
cwd=None,
is_shell=True,
is_save=False)]
with patch_logger(sp_mod_name, logging.DEBUG) as mock_logger_debug:
with patch('subprocess.run') as mock_run:
obj.run_step()
assert mock_logger_debug.mock_calls == [
call('stdout & stderr inheriting from parent process.'),
call('Processing command string: blah -blah1 --blah2')]
# blah is in a list because shell == false
mock_run.assert_called_once_with('blah -blah1 --blah2',
cwd=None,
shell=True,
check=True,
stdout=None,
stderr=None)
def test_cmdstep_runstep_cmd_is_string_formatting_shell_true():
"""Str command is always not is_save and works with formatting."""
obj = CmdStep('blahname',
Context({'k1': 'blah',
'cmd': '{k1} -{k1}1 --{k1}2'}),
is_shell=True)
assert obj.is_shell
assert obj.logger.name == 'blahname'
assert obj.context == Context({'k1': 'blah',
'cmd': '{k1} -{k1}1 --{k1}2'})
assert obj.commands == [Command('blah -blah1 --blah2',
cwd=None,
is_shell=True,
is_save=False)]
with patch_logger(sp_mod_name, logging.DEBUG) as mock_logger_debug:
with patch('subprocess.run') as mock_run:
obj.run_step()
assert mock_logger_debug.mock_calls == [
call('stdout & stderr inheriting from parent process.'),
call('Processing command string: blah -blah1 --blah2')]
# blah is a string because shell == true
mock_run.assert_called_once_with('blah -blah1 --blah2',
cwd=None,
shell=True,
check=True,
stdout=None,
stderr=None)
def test_cmdstep_runstep_cmd_is_dict_save_false_shell_false():
"""Dict command with save false and shell false."""
obj = CmdStep('blahname', Context({'cmd': {
'run': 'blah -blah1 --blah2'}}),
is_shell=False)
assert not obj.is_shell
assert obj.logger.name == 'blahname'
assert obj.context == Context({'cmd': {'run': 'blah -blah1 --blah2'}})
assert obj.commands == [Command('blah -blah1 --blah2',
cwd=None,
is_shell=False,
is_save=False)]
with patch_logger(sp_mod_name, logging.DEBUG) as mock_logger_debug:
with patch('subprocess.run') as mock_run:
obj.run_step()
assert mock_logger_debug.mock_calls == [
call('stdout & stderr inheriting from parent process.'),
call('Processing command string: blah -blah1 --blah2')]
# windows is always str
expected_cmd = get_plat(['blah', '-blah1', '--blah2'],
'blah -blah1 --blah2')
mock_run.assert_called_once_with(expected_cmd,
cwd=None,
shell=False,
check=True,
stdout=None,
stderr=None)
def test_cmdstep_runstep_cmd_is_dict_save_false_shell_true():
"""Dict command with save false and shell true."""
obj = CmdStep('blahname',
Context({'cmd': {
'run': 'blah -blah1 --blah2'}}),
is_shell=True)
assert obj.is_shell
assert obj.logger.name == 'blahname'
assert obj.context == Context({'cmd': {'run': 'blah -blah1 --blah2'}})
assert obj.commands == [Command('blah -blah1 --blah2',
cwd=None,
is_shell=True,
is_save=False)]
with patch_logger(sp_mod_name, logging.DEBUG) as mock_logger_debug:
with patch('subprocess.run') as mock_run:
obj.run_step()
assert mock_logger_debug.mock_calls == [
call('stdout & stderr inheriting from parent process.'),
call('Processing command string: blah -blah1 --blah2')]
mock_run.assert_called_once_with('blah -blah1 --blah2',
cwd=None,
shell=True,
check=True,
stdout=None,
stderr=None)
def test_cmdstep_runstep_cmd_is_dict_save_false_shell_true_cwd_formatting():
"""Dict command with save false and shell true, cwd formatting."""
obj = CmdStep('blahname', Context({
'k1': 'v1',
'k2': 'v2',
'cmd': {
'run': 'blah -blah1 --blah2', 'cwd': '/{k1}/{k2}'}}),
is_shell=True)
assert obj.is_shell
assert obj.logger.name == 'blahname'
assert obj.context == Context({'k1': 'v1',
'k2': 'v2',
'cmd': {
'run': 'blah -blah1 --blah2',
'cwd': '/{k1}/{k2}'}})
assert obj.commands == [Command('blah -blah1 --blah2',
cwd='/v1/v2',
is_shell=True,
is_save=False)]
with patch('subprocess.run') as mock_run:
with patch_logger(sp_mod_name, logging.DEBUG) as mock_logger_debug:
obj.run_step()
assert mock_logger_debug.mock_calls == [
call('stdout & stderr inheriting from parent process.'),
call('Processing command string in dir /v1/v2: blah -blah1 --blah2')]
mock_run.assert_called_once_with('blah -blah1 --blah2',
check=True,
cwd='/v1/v2',
shell=True,
stdout=None,
stderr=None)
def test_cmdstep_runstep_cmd_is_dict_save_true_shell_false():
"""Dict command with save false and shell false."""
context = Context({'cmd': {'run': 'blah -blah1 --blah2',
'save': True}})
obj = CmdStep('blahname', context)
assert obj.is_shell is False
assert obj.logger.name == 'blahname'
assert obj.context == Context({'cmd': {'run': 'blah -blah1 --blah2',
'save': True}})
assert obj.commands == [Command('blah -blah1 --blah2',
is_shell=False,
is_save=True)]
with patch('subprocess.run') as mock_run:
mock_run.return_value = subprocess.CompletedProcess(None,
0,
'std',
'err')
with patch_logger(sp_mod_name, logging.DEBUG) as mock_logger_debug:
with patch_logger(sp_mod_name, logging.ERROR) as mock_logger_error:
obj.run_step()
assert mock_logger_debug.mock_calls == [
call('stdout & stderr inheriting from parent process.'),
call('Processing command string: blah -blah1 --blah2')]
mock_logger_error.assert_called_once_with('stderr: err')
# blah is in a list because shell == false on posix.
# windows is always str
expected_cmd = get_plat(['blah', '-blah1', '--blah2'],
'blah -blah1 --blah2')
mock_run.assert_called_once_with(expected_cmd,
capture_output=True,
cwd=None,
encoding=None,
shell=False,
text=True)
assert context['cmdOut']['returncode'] == 0
assert context['cmdOut']['stdout'] == 'std'
assert context['cmdOut']['stderr'] == 'err'
def test_cmdstep_runstep_cmd_is_dict_save_true_shell_true():
"""Dict command with save false and shell true."""
context = Context({'cmd': {'run': 'blah -blah1 --blah2',
'save': True}})
obj = CmdStep('blahname', context, is_shell=True)
assert obj.is_shell is True
assert obj.logger.name == 'blahname'
assert obj.context == Context({'cmd': {'run': 'blah -blah1 --blah2',
'save': True}})
assert obj.commands == [Command('blah -blah1 --blah2',
is_shell=True,
is_save=True)]
with patch('subprocess.run') as mock_run:
mock_run.return_value = subprocess.CompletedProcess(None,
0,
'std',
None)
with patch_logger(sp_mod_name, logging.DEBUG) as mock_logger_debug:
with patch_logger(sp_mod_name, logging.INFO) as mock_logger_info:
obj.run_step()
assert mock_logger_debug.mock_calls == [
call('stdout & stderr inheriting from parent process.'),
call('Processing command string: blah -blah1 --blah2')]
mock_logger_info.assert_called_once_with('stdout: std')
# blah is in a str because shell == true
mock_run.assert_called_once_with('blah -blah1 --blah2',
capture_output=True,
cwd=None,
encoding=None,
shell=True,
text=True)
assert context['cmdOut']['returncode'] == 0
assert context['cmdOut']['stdout'] == 'std'
assert context['cmdOut']['stderr'] is None
def test_cmdstep_runstep_cmd_is_dict_save_true_shell_true_cwd_set():
"""Dict command with save false and shell true with cwd set."""
context = Context({'cmd': {'run': 'blah -blah1 --blah2',
'save': True,
'cwd': 'pathhere'}})
obj = CmdStep('blahname', context, is_shell=True)
assert obj.is_shell is True
assert obj.logger.name == 'blahname'
assert obj.context == Context({'cmd': {'run': 'blah -blah1 --blah2',
'save': True,
'cwd': 'pathhere'}})
assert obj.commands == [Command('blah -blah1 --blah2',
is_shell=True,
cwd='pathhere',
is_save=True)]
with patch('subprocess.run') as mock_run:
mock_run.return_value = subprocess.CompletedProcess(None,
0,
'std',
None)
with patch_logger(sp_mod_name, logging.DEBUG) as mock_logger_debug:
with patch_logger(sp_mod_name, logging.INFO) as mock_logger_info:
obj.run_step()
assert mock_logger_debug.mock_calls == [
call('stdout & stderr inheriting from parent process.'),
call('Processing command string in dir pathhere: blah -blah1 --blah2')]
mock_logger_info.assert_called_once_with('stdout: std')
# blah is in a str because shell is true
mock_run.assert_called_once_with('blah -blah1 --blah2',
capture_output=True,
cwd='pathhere',
encoding=None,
shell=True,
text=True)
assert context['cmdOut']['returncode'] == 0
assert context['cmdOut']['stdout'] == 'std'
assert context['cmdOut']['stderr'] is None
def test_cmdstep_runstep_cmd_is_dict_save_true_shell_false_formatting():
"""Dict command with save false and shell false with formatting."""
context = Context({'k1': 'blah',
'k2': True,
'cmd': {'run': '{k1} -{k1}1 --{k1}2',
'save': '{k2}'}})
obj = CmdStep('blahname', context)
assert obj.is_shell is False
assert obj.logger.name == 'blahname'
assert obj.context == Context({'k1': 'blah',
'k2': True,
'cmd': {'run': '{k1} -{k1}1 --{k1}2',
'save': '{k2}'}})
assert obj.commands == [Command('blah -blah1 --blah2', is_save=True)]
with patch('subprocess.run') as mock_run:
mock_run.return_value = subprocess.CompletedProcess(None,
0,
'std',
'err')
with patch_logger(sp_mod_name, logging.DEBUG) as mock_logger_debug:
with patch_logger(sp_mod_name,
logging.ERROR) as mock_logger_error:
obj.run_step()
assert mock_logger_debug.mock_calls == [
call('stdout & stderr inheriting from parent process.'),
call('Processing command string: blah -blah1 --blah2')]
mock_logger_error.assert_called_once_with('stderr: err')
# blah is in a list because shell == false on posix.
# windows is always str
expected_cmd = get_plat(['blah', '-blah1', '--blah2'],
'blah -blah1 --blah2')
mock_run.assert_called_once_with(expected_cmd,
capture_output=True,
cwd=None,
encoding=None,
shell=False,
text=True)
assert context['cmdOut']['returncode'] == 0
assert context['cmdOut']['stdout'] == 'std'
assert context['cmdOut']['stderr'] == 'err'
| 37.802436
| 80
| 0.510954
| 2,978
| 27,936
| 4.612827
| 0.061786
| 0.041275
| 0.06013
| 0.034578
| 0.87712
| 0.849312
| 0.814588
| 0.801339
| 0.76647
| 0.748781
| 0
| 0.014319
| 0.37253
| 27,936
| 738
| 81
| 37.853659
| 0.769354
| 0.055233
| 0
| 0.766038
| 0
| 0
| 0.198482
| 0.001233
| 0
| 0
| 0
| 0
| 0.241509
| 1
| 0.054717
| false
| 0.001887
| 0.020755
| 0
| 0.077358
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fb8c7146f1f9efd8cb2b6e8f1785ee2edcebea3b
| 642
|
py
|
Python
|
pava/implementation/natives/com/sun/xml/internal/ws/policy/EffectiveAlternativeSelector.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | 4
|
2017-03-30T16:51:16.000Z
|
2020-10-05T12:25:47.000Z
|
pava/implementation/natives/com/sun/xml/internal/ws/policy/EffectiveAlternativeSelector.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | null | null | null |
pava/implementation/natives/com/sun/xml/internal/ws/policy/EffectiveAlternativeSelector.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | null | null | null |
def add_native_methods(clazz):
def __java_init______(a0):
raise NotImplementedError()
def selectAlternatives__com_sun_xml_internal_ws_policy_EffectivePolicyModifier__com_sun_xml_internal_ws_policy_AssertionValidationProcessor__(a0, a1):
raise NotImplementedError()
clazz.__java_init______ = __java_init______
clazz.selectAlternatives__com_sun_xml_internal_ws_policy_EffectivePolicyModifier__com_sun_xml_internal_ws_policy_AssertionValidationProcessor__ = staticmethod(selectAlternatives__com_sun_xml_internal_ws_policy_EffectivePolicyModifier__com_sun_xml_internal_ws_policy_AssertionValidationProcessor__)
| 58.363636
| 301
| 0.88785
| 68
| 642
| 7.161765
| 0.308824
| 0.073922
| 0.110883
| 0.209446
| 0.73306
| 0.73306
| 0.73306
| 0.73306
| 0.73306
| 0.73306
| 0
| 0.005085
| 0.080997
| 642
| 10
| 302
| 64.2
| 0.820339
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.428571
| false
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
837833f6a4bf17de64199df47ab1d74b1ec9a4a3
| 10,748
|
py
|
Python
|
Experiments/ST_MGCN/Runner_features_analysis_120_STMGCN.py
|
TempAnonymous/Context_Analysis
|
bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e
|
[
"MIT"
] | 3
|
2021-06-29T06:18:18.000Z
|
2021-09-07T03:11:35.000Z
|
Experiments/ST_MGCN/Runner_features_analysis_120_STMGCN.py
|
TempAnonymous/Context_Analysis
|
bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e
|
[
"MIT"
] | null | null | null |
Experiments/ST_MGCN/Runner_features_analysis_120_STMGCN.py
|
TempAnonymous/Context_Analysis
|
bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e
|
[
"MIT"
] | null | null | null |
import os
#############################################
# BenchMark Bike Chicago
#############################################
bike_shared_params_st_mgcn = ('python ST_MGCN_Obj.py '
'--Dataset Bike '
'--CT 6 '
'--PT 7 '
'--TT 4 '
'--K 1 '
'--L 1 '
'--Graph Distance-Correlation-Interaction '
'--LSTMUnits 64 '
'--LSTMLayers 3 '
'--DataRange All '
'--TrainDays 365 '
'--threshold_correlation 0 '
'--threshold_distance 1000 '
'--threshold_interaction 500 '
'--Epoch 10000 '
'--Train True '
'--lr 5e-4 '
'--patience 0.1 '
'--ESlength 100 '
'--BatchSize 16 '
'--MergeWay sum '
)
# Chicago
os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method not-linear-gating '
' --external_use weather --MergeIndex 2 --CodeVersion gating_wa ')
os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method not-linear-gating '
' --external_use holiday --MergeIndex 2 --CodeVersion gating_hi ')
os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method not-linear-gating '
' --external_use tp --MergeIndex 2 --CodeVersion gating_tp ')
os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method not-linear-gating '
' --external_use weather-holiday --MergeIndex 2 --CodeVersion gating_wa_hi ')
os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method not-linear-gating '
' --external_use weather-tp --MergeIndex 2 --CodeVersion gating_wa_tp ')
os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method not-linear-gating '
' --external_use holiday-tp --MergeIndex 2 --CodeVersion gating_hi_tp ')
#############################################
# BenchMark Metro Shanghai
#############################################
metro_shared_params_st_mgcn = ('python ST_MGCN_Obj.py '
'--Dataset Metro '
'--CT 6 '
'--PT 7 '
'--TT 4 '
'--K 1 '
'--L 1 '
'--Graph Distance-Correlation '
'--LSTMUnits 64 '
'--LSTMLayers 3 '
'--DataRange All '
'--TrainDays All '
'--threshold_correlation 0.7 '
'--threshold_distance 5000 '
'--threshold_interaction 30 '
'--Epoch 10000 '
'--Train True '
'--lr 1e-4 '
'--patience 0.1 '
'--ESlength 100 '
'--BatchSize 16 '
'--MergeWay sum '
)
# Shanghai
os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating '
' --external_use weather --MergeIndex 2 --CodeVersion gating_wa ')
os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating '
' --external_use holiday --MergeIndex 2 --CodeVersion gating_hi ')
os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating '
' --external_use tp --MergeIndex 2 --CodeVersion gating_tp ')
os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating '
' --external_use poi --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi ')
os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating '
' --external_use weather-holiday --MergeIndex 2 --CodeVersion gating_wa_hi ')
os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating '
' --external_use weather-tp --MergeIndex 2 --CodeVersion gating_wa_tp ')
os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating '
' --external_use holiday-tp --MergeIndex 2 --CodeVersion gating_hi_tp ')
os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating '
' --external_use poi-weather --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi_wa ')
os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating '
' --external_use poi-holiday --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi_hi ')
os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating '
' --external_use poi-tp --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi_tp ')
os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating '
' --external_use poi-weather-holiday --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi_wa_hi ')
os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating '
' --external_use poi-weather-tp --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi_wa_tp ')
os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating '
' --external_use poi-holiday-tp --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi_hi_tp ')
os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating '
' --external_use poi-weather-holiday-tp --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi_wa_hi_tp ')
#############################################
# BenchMark ChargeStation
#############################################
cs_shared_params_st_mgcn = ('python ST_MGCN_Obj.py '
'--Dataset ChargeStation '
'--CT 6 '
'--PT 7 '
'--TT 4 '
'--K 1 '
'--L 1 '
'--Graph Distance-Correlation '
'--LSTMUnits 64 '
'--LSTMLayers 3 '
'--DataRange All '
'--TrainDays All '
'--threshold_correlation 0.1 '
'--threshold_distance 1000 '
'--threshold_interaction 500 '
'--Epoch 10000 '
'--Train True '
'--lr 5e-4 '
'--patience 0.1 '
'--ESlength 100 '
'--BatchSize 16 '
'--MergeWay max '
)
# Beijing
os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating '
' --external_use weather --MergeIndex 2 --CodeVersion gating_wa ')
os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating '
' --external_use holiday --MergeIndex 2 --CodeVersion gating_hi ')
os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating '
' --external_use tp --MergeIndex 2 --CodeVersion gating_tp ')
os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating '
' --external_use poi --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi ')
os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating '
' --external_use weather-holiday --MergeIndex 2 --CodeVersion gating_wa_hi ')
os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating '
' --external_use weather-tp --MergeIndex 2 --CodeVersion gating_wa_tp ')
os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating '
' --external_use holiday-tp --MergeIndex 2 --CodeVersion gating_hi_tp ')
os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating '
' --external_use poi-weather --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi_wa ')
os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating '
' --external_use poi-holiday --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi_hi ')
os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating '
' --external_use poi-tp --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi_tp ')
os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating '
' --external_use poi-weather-holiday --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi_wa_hi ')
os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating '
' --external_use poi-weather-tp --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi_wa_tp ')
os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating '
' --external_use poi-holiday-tp --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi_hi_tp ')
os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating '
' --external_use poi-weather-holiday-tp --poi_distance 5000 --MergeIndex 2 --CodeVersion gating_poi_wa_hi_tp ')
| 53.74
| 149
| 0.524749
| 1,052
| 10,748
| 5.062738
| 0.073194
| 0.045062
| 0.097259
| 0.125047
| 0.962261
| 0.956252
| 0.956252
| 0.948179
| 0.948179
| 0.948179
| 0
| 0.027654
| 0.354019
| 10,748
| 199
| 150
| 54.01005
| 0.73945
| 0.008932
| 0
| 0.862319
| 0
| 0
| 0.532106
| 0.020632
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007246
| 0
| 0.007246
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8393450a8909ee42421d4535053458143f1e7974
| 2,397
|
py
|
Python
|
tests/unitary/LiquidityGaugeV5/test_deposit_withdraw.py
|
hedgx/ribbonomics
|
84a212a82eaaa2824ebe3c072413e143eaca02a2
|
[
"MIT"
] | 2
|
2022-01-13T21:11:30.000Z
|
2022-03-10T08:20:42.000Z
|
tests/unitary/LiquidityGaugeV5/test_deposit_withdraw.py
|
hedgx/ribbonomics
|
84a212a82eaaa2824ebe3c072413e143eaca02a2
|
[
"MIT"
] | null | null | null |
tests/unitary/LiquidityGaugeV5/test_deposit_withdraw.py
|
hedgx/ribbonomics
|
84a212a82eaaa2824ebe3c072413e143eaca02a2
|
[
"MIT"
] | 2
|
2022-01-30T20:54:55.000Z
|
2022-03-05T17:49:19.000Z
|
import brownie
import pytest
@pytest.fixture(scope="module", autouse=True)
def deposit_setup(accounts, gauge_v5, mock_lp_token):
mock_lp_token.approve(gauge_v5, 2 ** 256 - 1, {"from": accounts[0]})
def test_deposit(accounts, gauge_v5, mock_lp_token):
balance = mock_lp_token.balanceOf(accounts[0])
gauge_v5.deposit(100000, {"from": accounts[0]})
assert mock_lp_token.balanceOf(gauge_v5) == 100000
assert mock_lp_token.balanceOf(accounts[0]) == balance - 100000
assert gauge_v5.totalSupply() == 100000
assert gauge_v5.balanceOf(accounts[0]) == 100000
def test_deposit_zero(accounts, gauge_v5, mock_lp_token):
balance = mock_lp_token.balanceOf(accounts[0])
gauge_v5.deposit(0, {"from": accounts[0]})
assert mock_lp_token.balanceOf(gauge_v5) == 0
assert mock_lp_token.balanceOf(accounts[0]) == balance
assert gauge_v5.totalSupply() == 0
assert gauge_v5.balanceOf(accounts[0]) == 0
def test_deposit_insufficient_balance(accounts, gauge_v5, mock_lp_token):
with brownie.reverts():
gauge_v5.deposit(100000, {"from": accounts[1]})
def test_withdraw(accounts, gauge_v5, mock_lp_token):
balance = mock_lp_token.balanceOf(accounts[0])
gauge_v5.deposit(100000, {"from": accounts[0]})
gauge_v5.withdraw(100000, {"from": accounts[0]})
assert mock_lp_token.balanceOf(gauge_v5) == 0
assert mock_lp_token.balanceOf(accounts[0]) == balance
assert gauge_v5.totalSupply() == 0
assert gauge_v5.balanceOf(accounts[0]) == 0
def test_withdraw_zero(accounts, gauge_v5, mock_lp_token):
balance = mock_lp_token.balanceOf(accounts[0])
gauge_v5.deposit(100000, {"from": accounts[0]})
gauge_v5.withdraw(0, {"from": accounts[0]})
assert mock_lp_token.balanceOf(gauge_v5) == 100000
assert mock_lp_token.balanceOf(accounts[0]) == balance - 100000
assert gauge_v5.totalSupply() == 100000
assert gauge_v5.balanceOf(accounts[0]) == 100000
def test_withdraw_new_epoch(accounts, chain, gauge_v5, mock_lp_token):
balance = mock_lp_token.balanceOf(accounts[0])
gauge_v5.deposit(100000, {"from": accounts[0]})
chain.sleep(86400 * 400)
gauge_v5.withdraw(100000, {"from": accounts[0]})
assert mock_lp_token.balanceOf(gauge_v5) == 0
assert mock_lp_token.balanceOf(accounts[0]) == balance
assert gauge_v5.totalSupply() == 0
assert gauge_v5.balanceOf(accounts[0]) == 0
| 34.73913
| 73
| 0.722153
| 339
| 2,397
| 4.837758
| 0.117994
| 0.136585
| 0.154268
| 0.182927
| 0.845122
| 0.845122
| 0.793902
| 0.793902
| 0.793902
| 0.793902
| 0
| 0.083415
| 0.144764
| 2,397
| 68
| 74
| 35.25
| 0.716585
| 0
| 0
| 0.659574
| 0
| 0
| 0.019191
| 0
| 0
| 0
| 0
| 0
| 0.425532
| 1
| 0.148936
| false
| 0
| 0.042553
| 0
| 0.191489
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
83eafef10492fd0e1f9cdadabf226525f115b0a1
| 386
|
py
|
Python
|
pymtl3/passes/backends/yosys/__init__.py
|
kevinyuan/pymtl3
|
5949e6a4acc625c0ccbbb25be3af1d0db683df3c
|
[
"BSD-3-Clause"
] | 152
|
2020-06-03T02:34:11.000Z
|
2022-03-30T04:16:45.000Z
|
pymtl3/passes/backends/yosys/__init__.py
|
kevinyuan/pymtl3
|
5949e6a4acc625c0ccbbb25be3af1d0db683df3c
|
[
"BSD-3-Clause"
] | 139
|
2019-05-29T00:37:09.000Z
|
2020-05-17T16:49:26.000Z
|
pymtl3/passes/backends/yosys/__init__.py
|
kevinyuan/pymtl3
|
5949e6a4acc625c0ccbbb25be3af1d0db683df3c
|
[
"BSD-3-Clause"
] | 22
|
2020-05-18T13:42:05.000Z
|
2022-03-11T08:37:51.000Z
|
from ..verilog.VerilogPlaceholder import VerilogPlaceholder as YosysPlaceholder
from ..verilog.VerilogPlaceholderPass import (
VerilogPlaceholderPass as YosysPlaceholderPass,
)
from .import_.YosysVerilatorImportPass import YosysVerilatorImportPass
from .translation.YosysTranslationPass import YosysTranslationPass
from .YosysTranslationImportPass import YosysTranslationImportPass
| 48.25
| 79
| 0.88601
| 28
| 386
| 12.178571
| 0.428571
| 0.064516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07772
| 386
| 7
| 80
| 55.142857
| 0.957865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.714286
| 0.714286
| 0
| 0.714286
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
f7b00a6c6681ea8d88909b0a9d53e8f53a756eab
| 2,762
|
py
|
Python
|
tests/unit/test_punit_parser.py
|
mike0615/curie
|
e25691f465c23cf53c39be157fcfa2eea4978b26
|
[
"MIT"
] | 4
|
2019-02-26T05:18:13.000Z
|
2020-07-15T00:34:41.000Z
|
tests/unit/test_punit_parser.py
|
nutanix/curie
|
e25691f465c23cf53c39be157fcfa2eea4978b26
|
[
"MIT"
] | 3
|
2021-03-31T18:55:50.000Z
|
2021-04-20T17:13:31.000Z
|
tests/unit/test_punit_parser.py
|
mike0615/curie
|
e25691f465c23cf53c39be157fcfa2eea4978b26
|
[
"MIT"
] | 2
|
2020-01-09T02:24:00.000Z
|
2020-11-04T23:09:02.000Z
|
#
# Copyright (c) 2016 Nutanix Inc. All rights reserved.
#
#
# pylint: disable=pointless-statement
import unittest
from curie.punit_parser import PUnit
class TestCuriePUnitParser(unittest.TestCase):
UNIT_NAMES = ["byte", "decibel", "decibel ( simple-name_ )"]
def setUp(self):
pass
def test_base_name(self):
for text in self.UNIT_NAMES:
self.assertEqual(str(PUnit.from_string(text)), "%s*1" % text)
def test_multiplied_unit_only(self):
for text in self.UNIT_NAMES:
self.assertEqual(str(PUnit.from_string("* %s" % text)), "%s*1" % text)
def test_divided_unit_only(self):
for text in self.UNIT_NAMES:
self.assertEqual(str(PUnit.from_string("/ %s" % text)), "%s^-1*1" % text)
def test_base_and_multiplied_unit(self):
for text in self.UNIT_NAMES:
for text2 in self.UNIT_NAMES:
result = str(PUnit.from_string("%s * %s" % (text, text2)))
if text == text2:
self.assertEqual(result, "%s^2*1" % text)
else:
self.assertEqual(result, "%s*1" % "*".join(sorted([text, text2])))
def test_base_and_divided_unit(self):
for text in self.UNIT_NAMES:
for text2 in self.UNIT_NAMES:
result = str(PUnit.from_string("%s / %s" % (text, text2)))
if text == text2:
self.assertEqual(result, "1")
else:
self.assertEqual(result, "%s*%s^-1*1" % (text, text2))
def test_multiplied_and_divided_unit(self):
for text in self.UNIT_NAMES:
for text2 in self.UNIT_NAMES:
result = str(PUnit.from_string("* %s / %s" % (text, text2)))
if text == text2:
self.assertEqual(result, "1")
else:
self.assertEqual(result, "%s*%s^-1*1" % (text, text2))
def test_modifier1(self):
self.assertEqual(str(PUnit.from_string("* 10")), "10")
self.assertEqual(str(PUnit.from_string("/ 10")), "0.1")
def test_modifier2(self):
self.assertEqual(str(PUnit.from_string("* 10 ^ 2")), "100")
self.assertEqual(str(PUnit.from_string("/ 10 ^ -2")), "100")
self.assertEqual(str(PUnit.from_string("/ 10 ^ 2")), "0.01")
self.assertEqual(str(PUnit.from_string("* 10 ^ -2")), "0.01")
def test_modifier1_and_modifier2(self):
self.assertEqual(str(PUnit.from_string("* 2 * 10 ^ 2")), "200")
self.assertEqual(str(PUnit.from_string("* 2 / 10 ^ -2")), "200")
self.assertEqual(str(PUnit.from_string("* 2 / 10 ^ 2")), "0.02")
self.assertEqual(str(PUnit.from_string("* 2 * 10 ^ -2")), "0.02")
self.assertEqual(str(PUnit.from_string("/ 2 * 10 ^ 2")), "50.0")
self.assertEqual(str(PUnit.from_string("/ 2 / 10 ^ -2")), "50.0")
self.assertEqual(str(PUnit.from_string("/ 2 / 10 ^ 2")), "0.005")
self.assertEqual(str(PUnit.from_string("/ 2 * 10 ^ -2")), "0.005")
| 36.342105
| 79
| 0.625272
| 396
| 2,762
| 4.217172
| 0.156566
| 0.206587
| 0.143713
| 0.215569
| 0.78503
| 0.769461
| 0.752695
| 0.731737
| 0.692814
| 0.692814
| 0
| 0.055331
| 0.195148
| 2,762
| 75
| 80
| 36.826667
| 0.695906
| 0.031861
| 0
| 0.339286
| 0
| 0
| 0.115067
| 0
| 0
| 0
| 0
| 0
| 0.410714
| 1
| 0.178571
| false
| 0.017857
| 0.035714
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f7c333bc5bab62b0e9dcb340485753d188612224
| 7,376
|
py
|
Python
|
py_hcl/firrtl_ir/expr/prim_ops.py
|
zhongzc/py-hcl
|
5a2be0208f915377a1dae12509f1af016df6412b
|
[
"MIT"
] | null | null | null |
py_hcl/firrtl_ir/expr/prim_ops.py
|
zhongzc/py-hcl
|
5a2be0208f915377a1dae12509f1af016df6412b
|
[
"MIT"
] | null | null | null |
py_hcl/firrtl_ir/expr/prim_ops.py
|
zhongzc/py-hcl
|
5a2be0208f915377a1dae12509f1af016df6412b
|
[
"MIT"
] | null | null | null |
from . import Expression
from ..utils import serialize_num
class Add(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"add(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Sub(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"sub(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Mul(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"mul(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Div(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"div(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Rem(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"rem(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Lt(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"lt(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Leq(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"leq(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Gt(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"gt(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Geq(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"geq(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Eq(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"eq(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Neq(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"neq(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class And(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"and(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Or(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"or(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Xor(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"xor(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Not(Expression):
def __init__(self, arg, tpe):
self.arg = arg
self.tpe = tpe
def serialize(self, output):
output.write(b"not(")
self.arg.serialize(output)
output.write(b")")
class Neg(Expression):
def __init__(self, arg, tpe):
self.arg = arg
self.tpe = tpe
def serialize(self, output):
output.write(b"neg(")
self.arg.serialize(output)
output.write(b")")
class Cat(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"cat(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Bits(Expression):
def __init__(self, ir_arg, const_args, tpe):
self.ir_arg = ir_arg
self.const_args = const_args
self.tpe = tpe
def serialize(self, output):
output.write(b"bits(")
self.ir_arg.serialize(output)
output.write(b", ")
output.write(serialize_num(self.const_args[0]))
output.write(b", ")
output.write(serialize_num(self.const_args[1]))
output.write(b")")
class AsUInt(Expression):
def __init__(self, arg, tpe):
self.arg = arg
self.tpe = tpe
def serialize(self, output):
output.write(b"asUInt(")
self.arg.serialize(output)
output.write(b")")
class AsSInt(Expression):
def __init__(self, arg, tpe):
self.arg = arg
self.tpe = tpe
def serialize(self, output):
output.write(b"asSInt(")
self.arg.serialize(output)
output.write(b")")
class Shl(Expression):
def __init__(self, ir_arg, const_arg, tpe):
self.ir_arg = ir_arg
self.const_arg = const_arg
self.tpe = tpe
def serialize(self, output):
output.write(b"shl(")
self.ir_arg.serialize(output)
output.write(b", ")
output.write(serialize_num(self.const_arg))
output.write(b")")
class Shr(Expression):
def __init__(self, ir_arg, const_arg, tpe):
self.ir_arg = ir_arg
self.const_arg = const_arg
self.tpe = tpe
def serialize(self, output):
output.write(b"shr(")
self.ir_arg.serialize(output)
output.write(b", ")
output.write(serialize_num(self.const_arg))
output.write(b")")
class Dshl(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"dshl(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Dshr(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"dshr(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
| 23.641026
| 55
| 0.570363
| 932
| 7,376
| 4.378755
| 0.04721
| 0.196766
| 0.202891
| 0.286694
| 0.941681
| 0.941681
| 0.941681
| 0.934085
| 0.889488
| 0.889488
| 0
| 0.006822
| 0.284572
| 7,376
| 311
| 56
| 23.717042
| 0.766534
| 0
| 0
| 0.774059
| 0
| 0
| 0.022641
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.200837
| false
| 0
| 0.008368
| 0
| 0.309623
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
e3910063b0e0521e6c33b21905d8e6ade7cdd324
| 176
|
py
|
Python
|
file_path.py
|
oushu1zhangxiangxuan1/pylayground
|
22590b10a5de7e07149e4a6029a094d51d2e48a4
|
[
"Apache-2.0"
] | null | null | null |
file_path.py
|
oushu1zhangxiangxuan1/pylayground
|
22590b10a5de7e07149e4a6029a094d51d2e48a4
|
[
"Apache-2.0"
] | null | null | null |
file_path.py
|
oushu1zhangxiangxuan1/pylayground
|
22590b10a5de7e07149e4a6029a094d51d2e48a4
|
[
"Apache-2.0"
] | null | null | null |
import os
print(os.path.dirname(__file__))
print(os.path.abspath(__file__))
print(os.path.abspath(os.path.dirname(__file__)))
print(os.path.dirname(os.path.abspath(__file__)))
| 29.333333
| 49
| 0.784091
| 28
| 176
| 4.357143
| 0.25
| 0.295082
| 0.360656
| 0.368852
| 0.696721
| 0.459016
| 0.459016
| 0
| 0
| 0
| 0
| 0
| 0.034091
| 176
| 5
| 50
| 35.2
| 0.717647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0.8
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
e3a9ca5754eef4d5cd991b5977c2449e9061b787
| 38,159
|
py
|
Python
|
nipyapi/registry/apis/access_api.py
|
Zyrix/nipyapi
|
d00221ba50bd83e21133d6e4d4b56741ead6822a
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/registry/apis/access_api.py
|
Zyrix/nipyapi
|
d00221ba50bd83e21133d6e4d4b56741ead6822a
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/registry/apis/access_api.py
|
Zyrix/nipyapi
|
d00221ba50bd83e21133d6e4d4b56741ead6822a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Apache NiFi Registry REST API
The REST API provides an interface to a registry with operations for saving, versioning, reading NiFi flows and components.
OpenAPI spec version: 0.7.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class AccessApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_access_token_by_trying_all_providers(self, **kwargs):
"""
Create token trying all providers
Creates a token for accessing the REST API via auto-detected method of verifying client identity claim credentials. The token returned is formatted as a JSON Web Token (JWT). The token is base64 encoded and comprised of three parts. The header, the body, and the signature. The expiration of the token is a contained within the body. The token can be used in the Authorization header in the format 'Authorization: Bearer <token>'.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_access_token_by_trying_all_providers(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_access_token_by_trying_all_providers_with_http_info(**kwargs)
else:
(data) = self.create_access_token_by_trying_all_providers_with_http_info(**kwargs)
return data
def create_access_token_by_trying_all_providers_with_http_info(self, **kwargs):
"""
Create token trying all providers
Creates a token for accessing the REST API via auto-detected method of verifying client identity claim credentials. The token returned is formatted as a JSON Web Token (JWT). The token is base64 encoded and comprised of three parts. The header, the body, and the signature. The expiration of the token is a contained within the body. The token can be used in the Authorization header in the format 'Authorization: Bearer <token>'.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_access_token_by_trying_all_providers_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_access_token_by_trying_all_providers" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/plain'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/access/token', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_access_token_using_basic_auth_credentials(self, **kwargs):
"""
Create token using basic auth
Creates a token for accessing the REST API via username/password. The user credentials must be passed in standard HTTP Basic Auth format. That is: 'Authorization: Basic <credentials>', where <credentials> is the base64 encoded value of '<username>:<password>'. The token returned is formatted as a JSON Web Token (JWT). The token is base64 encoded and comprised of three parts. The header, the body, and the signature. The expiration of the token is a contained within the body. The token can be used in the Authorization header in the format 'Authorization: Bearer <token>'.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_access_token_using_basic_auth_credentials(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_access_token_using_basic_auth_credentials_with_http_info(**kwargs)
else:
(data) = self.create_access_token_using_basic_auth_credentials_with_http_info(**kwargs)
return data
def create_access_token_using_basic_auth_credentials_with_http_info(self, **kwargs):
"""
Create token using basic auth
Creates a token for accessing the REST API via username/password. The user credentials must be passed in standard HTTP Basic Auth format. That is: 'Authorization: Basic <credentials>', where <credentials> is the base64 encoded value of '<username>:<password>'. The token returned is formatted as a JSON Web Token (JWT). The token is base64 encoded and comprised of three parts. The header, the body, and the signature. The expiration of the token is a contained within the body. The token can be used in the Authorization header in the format 'Authorization: Bearer <token>'.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_access_token_using_basic_auth_credentials_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_access_token_using_basic_auth_credentials" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/plain'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth', 'BasicAuth']
return self.api_client.call_api('/access/token/login', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_access_token_using_identity_provider_credentials(self, **kwargs):
"""
Create token using identity provider
Creates a token for accessing the REST API via a custom identity provider. The user credentials must be passed in a format understood by the custom identity provider, e.g., a third-party auth token in an HTTP header. The exact format of the user credentials expected by the custom identity provider can be discovered by 'GET /access/token/identity-provider/usage'. The token returned is formatted as a JSON Web Token (JWT). The token is base64 encoded and comprised of three parts. The header, the body, and the signature. The expiration of the token is a contained within the body. The token can be used in the Authorization header in the format 'Authorization: Bearer <token>'.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_access_token_using_identity_provider_credentials(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_access_token_using_identity_provider_credentials_with_http_info(**kwargs)
else:
(data) = self.create_access_token_using_identity_provider_credentials_with_http_info(**kwargs)
return data
def create_access_token_using_identity_provider_credentials_with_http_info(self, **kwargs):
"""
Create token using identity provider
Creates a token for accessing the REST API via a custom identity provider. The user credentials must be passed in a format understood by the custom identity provider, e.g., a third-party auth token in an HTTP header. The exact format of the user credentials expected by the custom identity provider can be discovered by 'GET /access/token/identity-provider/usage'. The token returned is formatted as a JSON Web Token (JWT). The token is base64 encoded and comprised of three parts. The header, the body, and the signature. The expiration of the token is a contained within the body. The token can be used in the Authorization header in the format 'Authorization: Bearer <token>'.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_access_token_using_identity_provider_credentials_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_access_token_using_identity_provider_credentials" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/plain'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/access/token/identity-provider', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_access_token_using_kerberos_ticket(self, **kwargs):
"""
Create token using kerberos
Creates a token for accessing the REST API via Kerberos Service Tickets or SPNEGO Tokens (which includes Kerberos Service Tickets). The token returned is formatted as a JSON Web Token (JWT). The token is base64 encoded and comprised of three parts. The header, the body, and the signature. The expiration of the token is a contained within the body. The token can be used in the Authorization header in the format 'Authorization: Bearer <token>'.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_access_token_using_kerberos_ticket(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_access_token_using_kerberos_ticket_with_http_info(**kwargs)
else:
(data) = self.create_access_token_using_kerberos_ticket_with_http_info(**kwargs)
return data
def create_access_token_using_kerberos_ticket_with_http_info(self, **kwargs):
"""
Create token using kerberos
Creates a token for accessing the REST API via Kerberos Service Tickets or SPNEGO Tokens (which includes Kerberos Service Tickets). The token returned is formatted as a JSON Web Token (JWT). The token is base64 encoded and comprised of three parts. The header, the body, and the signature. The expiration of the token is a contained within the body. The token can be used in the Authorization header in the format 'Authorization: Bearer <token>'.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_access_token_using_kerberos_ticket_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_access_token_using_kerberos_ticket" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/plain'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/access/token/kerberos', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_access_status(self, **kwargs):
"""
Get access status
Returns the current client's authenticated identity and permissions to top-level resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_access_status(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: CurrentUser
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_access_status_with_http_info(**kwargs)
else:
(data) = self.get_access_status_with_http_info(**kwargs)
return data
def get_access_status_with_http_info(self, **kwargs):
"""
Get access status
Returns the current client's authenticated identity and permissions to top-level resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_access_status_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: CurrentUser
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_access_status" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth', 'Authorization']
return self.api_client.call_api('/access', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CurrentUser',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_identity_provider_usage_instructions(self, **kwargs):
"""
Get identity provider usage
Provides a description of how the currently configured identity provider expects credentials to be passed to POST /access/token/identity-provider
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_identity_provider_usage_instructions(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_identity_provider_usage_instructions_with_http_info(**kwargs)
else:
(data) = self.get_identity_provider_usage_instructions_with_http_info(**kwargs)
return data
def get_identity_provider_usage_instructions_with_http_info(self, **kwargs):
"""
Get identity provider usage
Provides a description of how the currently configured identity provider expects credentials to be passed to POST /access/token/identity-provider
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_identity_provider_usage_instructions_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_identity_provider_usage_instructions" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/plain'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/access/token/identity-provider/usage', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def log_out(self, **kwargs):
"""
Performs a logout for other providers that have been issued a JWT.
NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.log_out(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.log_out_with_http_info(**kwargs)
else:
(data) = self.log_out_with_http_info(**kwargs)
return data
def log_out_with_http_info(self, **kwargs):
"""
Performs a logout for other providers that have been issued a JWT.
NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.log_out_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method log_out" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/access/logout', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def test_identity_provider_recognizes_credentials_format(self, **kwargs):
"""
Test identity provider
Tests the format of the credentials against this identity provider without preforming authentication on the credentials to validate them. The user credentials should be passed in a format understood by the custom identity provider as defined by 'GET /access/token/identity-provider/usage'.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.test_identity_provider_recognizes_credentials_format(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.test_identity_provider_recognizes_credentials_format_with_http_info(**kwargs)
else:
(data) = self.test_identity_provider_recognizes_credentials_format_with_http_info(**kwargs)
return data
def test_identity_provider_recognizes_credentials_format_with_http_info(self, **kwargs):
"""
Test identity provider
Tests the format of the credentials against this identity provider without preforming authentication on the credentials to validate them. The user credentials should be passed in a format understood by the custom identity provider as defined by 'GET /access/token/identity-provider/usage'.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.test_identity_provider_recognizes_credentials_format_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_identity_provider_recognizes_credentials_format" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/plain'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth']
return self.api_client.call_api('/access/token/identity-provider/test', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 46.197337
| 687
| 0.59425
| 4,040
| 38,159
| 5.383416
| 0.063614
| 0.058853
| 0.020599
| 0.026484
| 0.968136
| 0.963355
| 0.957929
| 0.943768
| 0.932411
| 0.926387
| 0
| 0.001024
| 0.334914
| 38,159
| 825
| 688
| 46.253333
| 0.855944
| 0.404308
| 0
| 0.8075
| 0
| 0
| 0.130146
| 0.045241
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0425
| false
| 0
| 0.0175
| 0
| 0.1225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e3d220542d4be1d8b49cc6571b43347d70406fde
| 38,715
|
py
|
Python
|
three_player_games/rationale_3players_text_matching_models.py
|
Gorov/three_player_for_emnlp
|
d8cd74efeaf8c36304a9179690b384dbe88dbc6b
|
[
"MIT"
] | 10
|
2019-09-19T12:01:58.000Z
|
2021-02-14T04:33:33.000Z
|
three_player_games/rationale_3players_text_matching_models.py
|
Gorov/three_player_for_emnlp
|
d8cd74efeaf8c36304a9179690b384dbe88dbc6b
|
[
"MIT"
] | null | null | null |
three_player_games/rationale_3players_text_matching_models.py
|
Gorov/three_player_for_emnlp
|
d8cd74efeaf8c36304a9179690b384dbe88dbc6b
|
[
"MIT"
] | 3
|
2019-12-17T16:06:58.000Z
|
2020-11-15T08:59:08.000Z
|
# coding: utf-8
# In[ ]:
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import copy
# from models.models import CnnModel, RnnModel
# from basic_nlp_models import BasicNLPModel
# from models.encoder import Encoder, ClassificationEncoder
from models.rnn_model import RnnModel
from models.generator import Generator, DepGenerator
from utils.utils import single_regularization_loss_batch, bao_regularization_hinge_loss_batch
from utils.utils import bao_regularization_loss_batch, count_regularization_loss_batch
from utils.utils import count_regularization_hinge_loss_batch
from utils.utils import bao_regularization_hinge_loss_batch_with_none_loss
from collections import deque
# In[ ]:
class MatchingClassifierModule(nn.Module):
'''
classifier for both E and E_anti models
'''
def __init__(self, args):
super(MatchingClassifierModule, self).__init__()
self.args = args
self.num_labels = args.num_labels
self.hidden_dim = args.hidden_dim
self.mlp_hidden_dim = args.mlp_hidden_dim #50
self.input_dim = args.embedding_dim
if self.args.dropout > 0:
self.dropout_layer = nn.Dropout(self.args.dropout)
self.encoder = RnnModel(self.args, self.input_dim)
self.predictor = nn.Linear(self.hidden_dim * 4, self.num_labels)
self.NEG_INF = -1.0e6
def forward(self, q_embeddings, p_embeddings, z_q, z_p, q_mask, p_mask, p_sort_idx=None, revert_p_idx=None):
"""
Inputs:
word_embeddings -- torch Variable in shape of (batch_size, length, embed_dim)
z -- rationale (batch_size, length)
mask -- torch Variable in shape of (batch_size, length)
Outputs:
predict -- (batch_size, num_label)
"""
q_masked_input = q_embeddings * z_q.unsqueeze(-1)
q_hiddens = self.encoder(q_masked_input, q_mask)
p_masked_input = p_embeddings * z_p.unsqueeze(-1)
p_hiddens_sort_ = self.encoder(p_masked_input[p_sort_idx,:,:], p_mask[p_sort_idx,:])
p_hiddens = p_hiddens_sort_[revert_p_idx, :, :]
if self.args.dropout > 0:
q_hiddens = self.dropout_layer(q_hiddens)
p_hiddens = self.dropout_layer(p_hiddens)
q_max_hidden = torch.max(q_hiddens + (1 - q_mask * z_q).unsqueeze(1) * self.NEG_INF, dim=2)[0]
# print p_embeddings.size()
# print p_masked_input.size()
# print p_hiddens_sort_.size()
# print p_hiddens.size()
# print p_mask.size()
# print z_p.size()
p_max_hidden = torch.max(p_hiddens + (1 - p_mask * z_p).unsqueeze(1) * self.NEG_INF, dim=2)[0]
# print(q_mask.size())
# print(z_q.size())
# print(q_hiddens.size())
# q_max_hidden = torch.sum(q_hiddens * (q_mask * z_q).unsqueeze(1), dim=2) / torch.sum(q_mask * z_q, dim=1).unsqueeze(1)
# p_max_hidden = torch.sum(p_hiddens * (p_mask * z_p).unsqueeze(1), dim=2) / torch.sum(p_mask * z_p, dim=1).unsqueeze(1)
predict = self.predictor(torch.cat([q_max_hidden, p_max_hidden, q_max_hidden * p_max_hidden,
torch.abs(q_max_hidden - p_max_hidden)], dim=1))
return predict
# In[ ]:
class IntrospectionGeneratorModule(nn.Module):
def __init__(self, args):
super(IntrospectionGeneratorModule, self).__init__()
self.args = args
self.num_labels = args.num_labels
self.hidden_dim = args.hidden_dim
self.mlp_hidden_dim = args.mlp_hidden_dim #50
self.label_embedding_dim = args.label_embedding_dim
self.fixed_classifier = args.fixed_classifier
self.input_dim = args.embedding_dim
self.NEG_INF = -1.0e6
self.lab_embed_layer = self._create_label_embed_layer() # should be shared with the Classifier_pred weights
# baseline classification model
self.Transformation = nn.Sequential()
self.Transformation.add_module('linear_layer', nn.Linear(self.label_embedding_dim, self.hidden_dim / 2))
self.Transformation.add_module('tanh_layer', nn.Tanh())
self.Generator = DepGenerator(args, self.input_dim)
def _create_label_embed_layer(self):
embed_layer = nn.Embedding(self.num_labels, self.label_embedding_dim)
embed_layer.weight.data.normal_(mean=0, std=0.1)
embed_layer.weight.requires_grad = True
return embed_layer
def forward(self, word_embeddings, cls_pred, mask):
cls_lab_embeddings = self.lab_embed_layer(cls_pred) # (batch_size, lab_emb_dim)
init_h0 = self.Transformation(cls_lab_embeddings) # (batch_size, hidden_dim / 2)
init_h0 = init_h0.unsqueeze(0).expand(2, init_h0.size(0), init_h0.size(1)).contiguous() # (2, batch_size, hidden_dim / 2)
z_scores_ = self.Generator(word_embeddings, h0=init_h0, mask=mask) #(batch_size, length, 2)
z_scores_[:, :, 1] = z_scores_[:, :, 1] + (1 - mask) * self.NEG_INF
return z_scores_
# In[ ]:
class Rationale3PlayerMatchingModel(nn.Module):
def __init__(self, embeddings, args):
super(Rationale3PlayerMatchingModel, self).__init__()
self.args = args
self.model_type = args.model_type
self.use_cuda = args.cuda
self.lambda_sparsity = args.lambda_sparsity
self.lambda_continuity = args.lambda_continuity
self.lambda_anti = args.lambda_anti
self.NEG_INF = -1.0e6
self.vocab_size, self.embedding_dim = embeddings.shape
self.embed_layer = self._create_embed_layer(embeddings)
self.num_labels = args.num_labels
self.hidden_dim = args.hidden_dim
self.mlp_hidden_dim = args.mlp_hidden_dim #50
self.input_dim = args.embedding_dim
self.E_model = MatchingClassifierModule(args)
self.E_anti_model = MatchingClassifierModule(args)
self.loss_func = nn.CrossEntropyLoss()
def _create_embed_layer(self, embeddings):
embed_layer = nn.Embedding(self.vocab_size, self.embedding_dim)
embed_layer.weight.data = torch.from_numpy(embeddings)
embed_layer.weight.requires_grad = self.args.fine_tuning
return embed_layer
def forward(self, x, mask):
pass
# In[ ]:
class HardRationale3PlayerMatchingModel(Rationale3PlayerMatchingModel):
def __init__(self, embeddings, args):
super(HardRationale3PlayerMatchingModel, self).__init__(embeddings, args)
self.generator = Generator(args, self.input_dim)
self.highlight_percentage = args.highlight_percentage
self.highlight_count = args.highlight_count
self.exploration_rate = args.exploration_rate
self.loss_func = nn.CrossEntropyLoss(reduce=False)
self.game_mode = args.game_mode
if args.margin is not None:
self.margin = args.margin
def init_optimizers(self):
self.opt_E = torch.optim.Adam(filter(lambda x: x.requires_grad, self.E_model.parameters()), lr=self.args.lr)
self.opt_E_anti = torch.optim.Adam(filter(lambda x: x.requires_grad, self.E_anti_model.parameters()), lr=self.args.lr)
def init_rl_optimizers(self):
self.opt_G_rl = torch.optim.Adam(filter(lambda x: x.requires_grad, self.generator.parameters()), lr=self.args.lr * 0.1)
def init_reward_queue(self):
queue_length = 200
self.z_q_history_rewards = deque(maxlen=queue_length)
self.z_q_history_rewards.append(0.)
self.z_p_history_rewards = deque(maxlen=queue_length)
self.z_p_history_rewards.append(0.)
def _generate_rationales(self, z_prob_):
'''
Input:
z_prob_ -- (num_rows, length, 2)
Output:
z -- (num_rows, length)
'''
z_prob__ = z_prob_.view(-1, 2) # (num_rows * length, 2)
# sample actions
sampler = torch.distributions.Categorical(z_prob__)
if self.training:
z_ = sampler.sample() # (num_rows * p_length,)
else:
z_ = torch.max(z_prob__, dim=-1)[1]
#(num_rows, length)
z = z_.view(z_prob_.size(0), z_prob_.size(1))
if self.use_cuda == True:
z = z.type(torch.cuda.FloatTensor)
else:
z = z.type(torch.FloatTensor)
# (num_rows * length,)
neg_log_probs_ = -sampler.log_prob(z_)
# (num_rows, length)
neg_log_probs = neg_log_probs_.view(z_prob_.size(0), z_prob_.size(1))
return z, neg_log_probs
def train_cls_one_step(self, q, p, label, q_mask, p_mask, p_sort_idx=None, revert_p_idx=None):
self.opt_E.zero_grad()
self.opt_E_anti.zero_grad()
predict = self.forward_cls(q, p, q_mask, p_mask, p_sort_idx, revert_p_idx)
e_loss = torch.mean(self.loss_func(predict, label))
losses = {'e_loss':e_loss.cpu().data}
e_loss.backward()
self.opt_E.step()
self.opt_E.zero_grad()
return losses, predict
def train_gen_one_step(self, q, p, label, q_mask, p_mask, p_sort_idx=None, revert_p_idx=None):
z_q_baseline = Variable(torch.FloatTensor([float(np.mean(self.z_q_history_rewards))]))
if self.args.cuda:
z_q_baseline = z_q_baseline.cuda()
z_p_baseline = Variable(torch.FloatTensor([float(np.mean(self.z_p_history_rewards))]))
if self.args.cuda:
z_p_baseline = z_p_baseline.cuda()
self.opt_G_rl.zero_grad()
predict, anti_predict, z_q, z_p, q_neg_log_probs, p_neg_log_probs = self.forward(q, p, q_mask, p_mask,
p_sort_idx, revert_p_idx)
e_loss_anti = torch.mean(self.loss_func(anti_predict, label))
e_loss = torch.mean(self.loss_func(predict, label))
rl_loss, q_rewards, p_rewards, continuity_loss, sparsity_loss = self.get_loss(predict, anti_predict,
z_q, z_p, q_neg_log_probs, p_neg_log_probs,
z_q_baseline, z_p_baseline,
q_mask, p_mask, label)
# losses = {'g_rl_loss':rl_loss.cpu().data}
losses = {'e_loss':e_loss.cpu().data, 'e_loss_anti':e_loss_anti.cpu().data,
'g_loss':rl_loss.cpu().data}
rl_loss.backward()
self.opt_G_rl.step()
self.opt_G_rl.zero_grad()
z_q_batch_reward = np.mean(q_rewards.cpu().data.numpy())
self.z_q_history_rewards.append(z_q_batch_reward)
z_p_batch_reward = np.mean(p_rewards.cpu().data.numpy())
self.z_p_history_rewards.append(z_p_batch_reward)
rewards = (q_rewards + p_rewards) / 2
return losses, predict, anti_predict, z_q, z_p, rewards, continuity_loss, sparsity_loss
def train_one_step(self, q, p, label, q_mask, p_mask, p_sort_idx=None, revert_p_idx=None):
z_q_baseline = Variable(torch.FloatTensor([float(np.mean(self.z_q_history_rewards))]))
if self.args.cuda:
z_q_baseline = z_q_baseline.cuda()
z_p_baseline = Variable(torch.FloatTensor([float(np.mean(self.z_p_history_rewards))]))
if self.args.cuda:
z_p_baseline = z_p_baseline.cuda()
self.opt_G_rl.zero_grad()
self.opt_E.zero_grad()
self.opt_E_anti.zero_grad()
predict, anti_predict, z_q, z_p, q_neg_log_probs, p_neg_log_probs = self.forward(q, p, q_mask, p_mask,
p_sort_idx, revert_p_idx)
e_loss_anti = torch.mean(self.loss_func(anti_predict, label))
e_loss = torch.mean(self.loss_func(predict, label))
rl_loss, q_rewards, p_rewards, continuity_loss, sparsity_loss = self.get_loss(predict, anti_predict,
z_q, z_p, q_neg_log_probs, p_neg_log_probs,
z_q_baseline, z_p_baseline,
q_mask, p_mask, label)
losses = {'e_loss':e_loss.cpu().data, 'e_loss_anti':e_loss_anti.cpu().data,
'g_loss':rl_loss.cpu().data}
e_loss_anti.backward()
self.opt_E_anti.step()
self.opt_E_anti.zero_grad()
e_loss.backward()
self.opt_E.step()
self.opt_E.zero_grad()
rl_loss.backward()
self.opt_G_rl.step()
self.opt_G_rl.zero_grad()
z_q_batch_reward = np.mean(q_rewards.cpu().data.numpy())
self.z_q_history_rewards.append(z_q_batch_reward)
z_p_batch_reward = np.mean(p_rewards.cpu().data.numpy())
self.z_p_history_rewards.append(z_p_batch_reward)
rewards = (q_rewards + p_rewards) / 2
return losses, predict, anti_predict, z_q, z_p, rewards, continuity_loss, sparsity_loss
def train_one_step_predictors(self, q, p, label, q_mask, p_mask, p_sort_idx=None, revert_p_idx=None):
z_q_baseline = Variable(torch.FloatTensor([float(np.mean(self.z_q_history_rewards))]))
if self.args.cuda:
z_q_baseline = z_q_baseline.cuda()
z_p_baseline = Variable(torch.FloatTensor([float(np.mean(self.z_p_history_rewards))]))
if self.args.cuda:
z_p_baseline = z_p_baseline.cuda()
self.opt_G_rl.zero_grad()
self.opt_E.zero_grad()
self.opt_E_anti.zero_grad()
predict, anti_predict, z_q, z_p, q_neg_log_probs, p_neg_log_probs = self.forward(q, p, q_mask, p_mask,
p_sort_idx, revert_p_idx)
e_loss_anti = torch.mean(self.loss_func(anti_predict, label))
e_loss = torch.mean(self.loss_func(predict, label))
rl_loss, q_rewards, p_rewards, continuity_loss, sparsity_loss = self.get_loss(predict, anti_predict,
z_q, z_p, q_neg_log_probs, p_neg_log_probs,
z_q_baseline, z_p_baseline,
q_mask, p_mask, label)
# losses = {'e_loss':e_loss.cpu().data, 'e_loss_anti':e_loss_anti.cpu().data}
losses = {'e_loss':e_loss.cpu().data, 'e_loss_anti':e_loss_anti.cpu().data,
'g_loss':rl_loss.cpu().data}
e_loss_anti.backward()
self.opt_E_anti.step()
self.opt_E_anti.zero_grad()
e_loss.backward()
self.opt_E.step()
self.opt_E.zero_grad()
z_q_batch_reward = np.mean(q_rewards.cpu().data.numpy())
self.z_q_history_rewards.append(z_q_batch_reward)
z_p_batch_reward = np.mean(p_rewards.cpu().data.numpy())
self.z_p_history_rewards.append(z_p_batch_reward)
rewards = (q_rewards + p_rewards) / 2
return losses, predict, anti_predict, z_q, z_p, rewards, continuity_loss, sparsity_loss
def forward_cls(self, q, p, q_mask, p_mask, p_sort_idx=None, revert_p_idx=None):
"""
Inputs:
x -- torch Variable in shape of (batch_size, length)
mask -- torch Variable in shape of (batch_size, length)
Outputs:
predict -- (batch_size, num_label)
z -- rationale (batch_size, length)
"""
q_embeddings = self.embed_layer(q) #(batch_size, length, embedding_dim)
p_embeddings = self.embed_layer(p) #(batch_size, length, embedding_dim)
neg_inf = -1.0e6
z_q = torch.ones_like(q_mask)
z_p = torch.ones_like(p_mask)
predict = self.E_model(q_embeddings, p_embeddings, z_q, z_p, q_mask, p_mask, p_sort_idx, revert_p_idx)
return predict
def forward(self, q, p, q_mask, p_mask, p_sort_idx=None, revert_p_idx=None):
"""
Inputs:
x -- torch Variable in shape of (batch_size, length)
mask -- torch Variable in shape of (batch_size, length)
Outputs:
predict -- (batch_size, num_label)
z -- rationale (batch_size, length)
"""
q_embeddings = self.embed_layer(q) #(batch_size, length, embedding_dim)
p_embeddings = self.embed_layer(p) #(batch_size, length, embedding_dim)
neg_inf = -1.0e6
z_scores_ = self.generator(q_embeddings, q_mask) #(batch_size, length, 2)
z_scores_[:, :, 1] = z_scores_[:, :, 1] + (1 - q_mask) * neg_inf
z_probs_ = F.softmax(z_scores_, dim=-1)
z_probs_ = (q_mask.unsqueeze(-1) * ( (1 - self.exploration_rate) * z_probs_ + self.exploration_rate / z_probs_.size(-1) ) ) + ((1 - q_mask.unsqueeze(-1)) * z_probs_)
z_q, q_neg_log_probs = self._generate_rationales(z_probs_)
z_scores_sort_ = self.generator(p_embeddings[p_sort_idx,:,:], p_mask[p_sort_idx,:])
z_scores_ = z_scores_sort_[revert_p_idx, :, :]
z_scores_[:, :, 1] = z_scores_[:, :, 1] + (1 - p_mask) * neg_inf
z_probs_ = F.softmax(z_scores_, dim=-1)
z_probs_ = (p_mask.unsqueeze(-1) * ( (1 - self.exploration_rate) * z_probs_ + self.exploration_rate / z_probs_.size(-1) ) ) + ((1 - p_mask.unsqueeze(-1)) * z_probs_)
z_p, p_neg_log_probs = self._generate_rationales(z_probs_)
predict = self.E_model(q_embeddings, p_embeddings, z_q, z_p, q_mask, p_mask, p_sort_idx, revert_p_idx)
anti_predict = self.E_anti_model(q_embeddings, p_embeddings, 1 - z_q, 1 - z_p, q_mask, p_mask, p_sort_idx, revert_p_idx)
return predict, anti_predict, z_q, z_p, q_neg_log_probs, p_neg_log_probs
def get_advantages(self, predict, anti_predict, label, z_q, z_p,
q_neg_log_probs, p_neg_log_probs, q_baseline, p_baseline, q_mask, p_mask):
'''
Input:
z -- (batch_size, length)
'''
# total loss of accuracy (not batchwise)
_, y_pred = torch.max(predict, dim=1)
if self.game_mode.startswith('3player'):
prediction = (y_pred == label).type(torch.FloatTensor) * (self.lambda_anti + 0.2)
# prediction = (y_pred == label).type(torch.FloatTensor) * 0.2
else:
prediction = (y_pred == label).type(torch.FloatTensor)
_, y_anti_pred = torch.max(anti_predict, dim=1)
prediction_anti = (y_anti_pred == label).type(torch.FloatTensor) * self.lambda_anti
if self.use_cuda:
prediction = prediction.cuda() #(batch_size,)
prediction_anti = prediction_anti.cuda()
q_continuity_loss, q_sparsity_loss = bao_regularization_loss_batch(z_q, self.highlight_percentage, q_mask)
p_continuity_loss, p_sparsity_loss = bao_regularization_loss_batch(z_p, self.highlight_percentage, p_mask)
# continuity_loss, sparsity_loss = bao_regularization_hinge_loss_batch(z, self.highlight_percentage, mask)
# continuity_loss, sparsity_loss = count_regularization_hinge_loss_batch(z, self.highlight_count, mask)
# continuity_loss, sparsity_loss = bao_regularization_hinge_loss_batch_with_none_loss(z, self.highlight_percentage,
# self.none_relation_id, mask)
q_continuity_loss = q_continuity_loss * self.lambda_continuity
p_continuity_loss = p_continuity_loss * self.lambda_continuity
q_sparsity_loss = q_sparsity_loss * self.lambda_sparsity
p_sparsity_loss = p_sparsity_loss * self.lambda_sparsity
# batch RL reward
if self.game_mode.startswith('3player'):
q_rewards = prediction - prediction_anti - q_sparsity_loss - q_continuity_loss
p_rewards = prediction - prediction_anti - p_sparsity_loss - p_continuity_loss
else:
q_rewards = prediction - q_sparsity_loss - q_continuity_loss
p_rewards = prediction - p_sparsity_loss - p_continuity_loss
q_advantages = q_rewards - q_baseline # (batch_size,)
q_advantages = Variable(q_advantages.data, requires_grad=False)
if self.use_cuda:
q_advantages = q_advantages.cuda()
p_advantages = p_rewards - p_baseline # (batch_size,)
p_advantages = Variable(p_advantages.data, requires_grad=False)
if self.use_cuda:
p_advantages = p_advantages.cuda()
return q_advantages, p_advantages, q_rewards, p_rewards, q_continuity_loss, q_sparsity_loss
def get_listwise_advantages(self, predict, anti_predict, label, z_q, z_p,
q_neg_log_probs, p_neg_log_probs, q_baseline, p_baseline, q_mask, p_mask):
'''
Input:
z -- (batch_size, length)
'''
# total loss of accuracy (not batchwise)
# predict -- (batch * sample, 2) -> (batch, sample) -> soft
predict_2d = predict[:,1].contiguous().view(self.args.batch_size, -1)
anti_predict_2d = anti_predict[:,1].contiguous().view(self.args.batch_size, -1)
_, y_pred = torch.max(predict_2d, dim=1)
prediction = (y_pred == 0).type(torch.FloatTensor) * (self.lambda_anti + 0.2)
prediction = prediction.unsqueeze(1).expand_as(predict_2d).contiguous().view(predict.size(0))
_, y_anti_pred = torch.max(anti_predict_2d, dim=1)
prediction_anti = (y_anti_pred == 0).type(torch.FloatTensor) * self.lambda_anti
prediction_anti = prediction_anti.unsqueeze(1).expand_as(anti_predict_2d).contiguous().view(anti_predict.size(0))
if self.use_cuda:
prediction = prediction.cuda() #(batch_size,)
prediction_anti = prediction_anti.cuda()
q_continuity_loss, q_sparsity_loss = bao_regularization_loss_batch(z_q, self.highlight_percentage, q_mask)
p_continuity_loss, p_sparsity_loss = bao_regularization_loss_batch(z_p, self.highlight_percentage, p_mask)
q_continuity_loss = q_continuity_loss * self.lambda_continuity
p_continuity_loss = p_continuity_loss * self.lambda_continuity
q_sparsity_loss = q_sparsity_loss * self.lambda_sparsity
p_sparsity_loss = p_sparsity_loss * self.lambda_sparsity
# batch RL reward
if self.game_mode.startswith('3player'):
q_rewards = prediction - prediction_anti - q_sparsity_loss - q_continuity_loss
p_rewards = prediction - prediction_anti - p_sparsity_loss - p_continuity_loss
else:
q_rewards = prediction - q_sparsity_loss - q_continuity_loss
p_rewards = prediction - p_sparsity_loss - p_continuity_loss
q_advantages = q_rewards - q_baseline # (batch_size,)
q_advantages = Variable(q_advantages.data, requires_grad=False)
if self.use_cuda:
q_advantages = q_advantages.cuda()
p_advantages = p_rewards - p_baseline # (batch_size,)
p_advantages = Variable(p_advantages.data, requires_grad=False)
if self.use_cuda:
p_advantages = p_advantages.cuda()
return q_advantages, p_advantages, q_rewards, p_rewards, q_continuity_loss, q_sparsity_loss
def get_loss(self, predict, anti_predict, z_q, z_p, q_neg_log_probs, p_neg_log_probs,
q_baseline, p_baseline, q_mask, p_mask, label):
reward_tuple = self.get_advantages(predict, anti_predict, label, z_q, z_p,
q_neg_log_probs, p_neg_log_probs,
q_baseline, p_baseline, q_mask, p_mask)
# reward_tuple = self.get_listwise_advantages(predict, anti_predict, label, z_q, z_p,
# q_neg_log_probs, p_neg_log_probs,
# q_baseline, p_baseline, q_mask, p_mask)
q_advantages, p_advantages, q_rewards, p_rewards, continuity_loss, sparsity_loss = reward_tuple
# (batch_size, q_length)
q_advantages_expand_ = q_advantages.unsqueeze(-1).expand_as(q_neg_log_probs)
p_advantages_expand_ = p_advantages.unsqueeze(-1).expand_as(p_neg_log_probs)
q_rl_loss = torch.sum(q_neg_log_probs * q_advantages_expand_ * q_mask)
p_rl_loss = torch.sum(p_neg_log_probs * p_advantages_expand_ * p_mask)
rl_loss = (q_rl_loss + p_rl_loss) / 2
return rl_loss, q_rewards, p_rewards, continuity_loss, sparsity_loss
# In[ ]:
class HardIntrospection3PlayerMatchingModel(HardRationale3PlayerMatchingModel):
def __init__(self, embeddings, args):
super(HardIntrospection3PlayerMatchingModel, self).__init__(embeddings, args)
self.generator = IntrospectionGeneratorModule(args)
self.classifier = MatchingClassifierModule(args)
def train_gen_one_step(self, q, p, label, q_mask, p_mask, p_sort_idx=None, revert_p_idx=None):
z_q_baseline = Variable(torch.FloatTensor([float(np.mean(self.z_q_history_rewards))]))
if self.args.cuda:
z_q_baseline = z_q_baseline.cuda()
z_p_baseline = Variable(torch.FloatTensor([float(np.mean(self.z_p_history_rewards))]))
if self.args.cuda:
z_p_baseline = z_p_baseline.cuda()
self.opt_G_rl.zero_grad()
predict, anti_predict, z_q, z_p, q_neg_log_probs, p_neg_log_probs = self.forward(q, p, q_mask, p_mask,
p_sort_idx, revert_p_idx)
e_loss_anti = torch.mean(self.loss_func(anti_predict, label))
e_loss = torch.mean(self.loss_func(predict, label))
rl_loss, q_rewards, p_rewards, continuity_loss, sparsity_loss = self.get_loss(predict, anti_predict,
z_q, z_p, q_neg_log_probs, p_neg_log_probs,
z_q_baseline, z_p_baseline,
q_mask, p_mask, label)
# losses = {'g_rl_loss':rl_loss.cpu().data}
losses = {'e_loss':e_loss.cpu().data, 'e_loss_anti':e_loss_anti.cpu().data,
'g_loss':rl_loss.cpu().data}
rl_loss.backward()
self.opt_G_rl.step()
self.opt_G_rl.zero_grad()
z_q_batch_reward = np.mean(q_rewards.cpu().data.numpy())
self.z_q_history_rewards.append(z_q_batch_reward)
z_p_batch_reward = np.mean(p_rewards.cpu().data.numpy())
self.z_p_history_rewards.append(z_p_batch_reward)
rewards = (q_rewards + p_rewards) / 2
return losses, predict, anti_predict, z_q, z_p, rewards, continuity_loss, sparsity_loss
def train_one_step(self, q, p, label, q_mask, p_mask, p_sort_idx=None, revert_p_idx=None):
z_q_baseline = Variable(torch.FloatTensor([float(np.mean(self.z_q_history_rewards))]))
if self.args.cuda:
z_q_baseline = z_q_baseline.cuda()
z_p_baseline = Variable(torch.FloatTensor([float(np.mean(self.z_p_history_rewards))]))
if self.args.cuda:
z_p_baseline = z_p_baseline.cuda()
self.opt_G_rl.zero_grad()
self.opt_E.zero_grad()
self.opt_E_anti.zero_grad()
predict, anti_predict, z_q, z_p, q_neg_log_probs, p_neg_log_probs = self.forward(q, p, q_mask, p_mask,
p_sort_idx, revert_p_idx)
e_loss_anti = torch.mean(self.loss_func(anti_predict, label))
e_loss = torch.mean(self.loss_func(predict, label))
rl_loss, q_rewards, p_rewards, continuity_loss, sparsity_loss = self.get_loss(predict, anti_predict,
z_q, z_p, q_neg_log_probs, p_neg_log_probs,
z_q_baseline, z_p_baseline,
q_mask, p_mask, label)
losses = {'e_loss':e_loss.cpu().data, 'e_loss_anti':e_loss_anti.cpu().data,
'g_loss':rl_loss.cpu().data}
e_loss_anti.backward()
self.opt_E_anti.step()
self.opt_E_anti.zero_grad()
e_loss.backward()
self.opt_E.step()
self.opt_E.zero_grad()
rl_loss.backward()
self.opt_G_rl.step()
self.opt_G_rl.zero_grad()
z_q_batch_reward = np.mean(q_rewards.cpu().data.numpy())
self.z_q_history_rewards.append(z_q_batch_reward)
z_p_batch_reward = np.mean(p_rewards.cpu().data.numpy())
self.z_p_history_rewards.append(z_p_batch_reward)
rewards = (q_rewards + p_rewards) / 2
return losses, predict, anti_predict, z_q, z_p, rewards, continuity_loss, sparsity_loss
def train_one_step_predictors(self, q, p, label, q_mask, p_mask, p_sort_idx=None, revert_p_idx=None):
z_q_baseline = Variable(torch.FloatTensor([float(np.mean(self.z_q_history_rewards))]))
if self.args.cuda:
z_q_baseline = z_q_baseline.cuda()
z_p_baseline = Variable(torch.FloatTensor([float(np.mean(self.z_p_history_rewards))]))
if self.args.cuda:
z_p_baseline = z_p_baseline.cuda()
self.opt_G_rl.zero_grad()
self.opt_E.zero_grad()
self.opt_E_anti.zero_grad()
predict, anti_predict, z_q, z_p, q_neg_log_probs, p_neg_log_probs = self.forward(q, p, q_mask, p_mask,
p_sort_idx, revert_p_idx)
e_loss_anti = torch.mean(self.loss_func(anti_predict, label))
e_loss = torch.mean(self.loss_func(predict, label))
rl_loss, q_rewards, p_rewards, continuity_loss, sparsity_loss = self.get_loss(predict, anti_predict,
z_q, z_p, q_neg_log_probs, p_neg_log_probs,
z_q_baseline, z_p_baseline,
q_mask, p_mask, label)
# losses = {'e_loss':e_loss.cpu().data, 'e_loss_anti':e_loss_anti.cpu().data}
losses = {'e_loss':e_loss.cpu().data, 'e_loss_anti':e_loss_anti.cpu().data,
'g_loss':rl_loss.cpu().data}
e_loss_anti.backward()
self.opt_E_anti.step()
self.opt_E_anti.zero_grad()
e_loss.backward()
self.opt_E.step()
self.opt_E.zero_grad()
z_q_batch_reward = np.mean(q_rewards.cpu().data.numpy())
self.z_q_history_rewards.append(z_q_batch_reward)
z_p_batch_reward = np.mean(p_rewards.cpu().data.numpy())
self.z_p_history_rewards.append(z_p_batch_reward)
rewards = (q_rewards + p_rewards) / 2
return losses, predict, anti_predict, z_q, z_p, rewards, continuity_loss, sparsity_loss
def forward(self, q, p, q_mask, p_mask, p_sort_idx=None, revert_p_idx=None):
"""
Inputs:
x -- torch Variable in shape of (batch_size, length)
mask -- torch Variable in shape of (batch_size, length)
Outputs:
predict -- (batch_size, num_label)
z -- rationale (batch_size, length)
"""
q_embeddings = self.embed_layer(q) #(batch_size, length, embedding_dim)
p_embeddings = self.embed_layer(p) #(batch_size, length, embedding_dim)
z_q_ = torch.ones_like(q_mask)
z_p_ = torch.ones_like(p_mask)
cls_predict = self.classifier(q_embeddings, p_embeddings, z_q_, z_p_, q_mask, p_mask, p_sort_idx, revert_p_idx)
_, cls_predict = torch.max(cls_predict, dim=1) # (batch_size,)
neg_inf = -1.0e6
z_scores_ = self.generator(q_embeddings, cls_predict, q_mask) #(batch_size, length, 2)
z_scores_[:, :, 1] = z_scores_[:, :, 1] + (1 - q_mask) * neg_inf
z_probs_ = F.softmax(z_scores_, dim=-1)
z_probs_ = (q_mask.unsqueeze(-1) * ( (1 - self.exploration_rate) * z_probs_ + self.exploration_rate / z_probs_.size(-1) ) ) + ((1 - q_mask.unsqueeze(-1)) * z_probs_)
z_q, q_neg_log_probs = self._generate_rationales(z_probs_)
z_scores_sort_ = self.generator(p_embeddings[p_sort_idx,:,:], cls_predict, p_mask[p_sort_idx,:])
z_scores_ = z_scores_sort_[revert_p_idx, :, :]
z_scores_[:, :, 1] = z_scores_[:, :, 1] + (1 - p_mask) * neg_inf
z_probs_ = F.softmax(z_scores_, dim=-1)
z_probs_ = (p_mask.unsqueeze(-1) * ( (1 - self.exploration_rate) * z_probs_ + self.exploration_rate / z_probs_.size(-1) ) ) + ((1 - p_mask.unsqueeze(-1)) * z_probs_)
z_p, p_neg_log_probs = self._generate_rationales(z_probs_)
predict = self.E_model(q_embeddings, p_embeddings, z_q, z_p, q_mask, p_mask, p_sort_idx, revert_p_idx)
anti_predict = self.E_anti_model(q_embeddings, p_embeddings, 1 - z_q, 1 - z_p, q_mask, p_mask, p_sort_idx, revert_p_idx)
return predict, anti_predict, z_q, z_p, q_neg_log_probs, p_neg_log_probs
def get_advantages(self, predict, anti_predict, label, z_q, z_p,
q_neg_log_probs, p_neg_log_probs, q_baseline, p_baseline, q_mask, p_mask):
'''
Input:
z -- (batch_size, length)
'''
# total loss of accuracy (not batchwise)
_, y_pred = torch.max(predict, dim=1)
if self.game_mode.startswith('3player'):
prediction = (y_pred == label).type(torch.FloatTensor) * (self.lambda_anti + 0.2)
# prediction = (y_pred == label).type(torch.FloatTensor) * 0.2
# prediction = (y_pred == label).type(torch.FloatTensor)
else:
prediction = (y_pred == label).type(torch.FloatTensor)
_, y_anti_pred = torch.max(anti_predict, dim=1)
prediction_anti = (y_anti_pred == label).type(torch.FloatTensor) * self.lambda_anti
if self.use_cuda:
prediction = prediction.cuda() #(batch_size,)
prediction_anti = prediction_anti.cuda()
q_continuity_loss, q_sparsity_loss = bao_regularization_loss_batch(z_q, self.highlight_percentage, q_mask)
p_continuity_loss, p_sparsity_loss = bao_regularization_loss_batch(z_p, self.highlight_percentage, p_mask)
q_continuity_loss = q_continuity_loss * self.lambda_continuity
p_continuity_loss = p_continuity_loss * self.lambda_continuity
q_sparsity_loss = q_sparsity_loss * self.lambda_sparsity
p_sparsity_loss = p_sparsity_loss * self.lambda_sparsity
# batch RL reward
if self.game_mode.startswith('3player'):
# q_rewards = prediction - prediction_anti - q_sparsity_loss - q_continuity_loss
# p_rewards = prediction - prediction_anti - p_sparsity_loss - p_continuity_loss
q_rewards = - prediction_anti - q_sparsity_loss - q_continuity_loss
p_rewards = - prediction_anti - p_sparsity_loss - p_continuity_loss
else:
q_rewards = prediction - q_sparsity_loss - q_continuity_loss
p_rewards = prediction - p_sparsity_loss - p_continuity_loss
q_advantages = q_rewards - q_baseline # (batch_size,)
q_advantages = Variable(q_advantages.data, requires_grad=False)
if self.use_cuda:
q_advantages = q_advantages.cuda()
p_advantages = p_rewards - p_baseline # (batch_size,)
p_advantages = Variable(p_advantages.data, requires_grad=False)
if self.use_cuda:
p_advantages = p_advantages.cuda()
return q_advantages, p_advantages, q_rewards, p_rewards, q_continuity_loss, q_sparsity_loss
def get_loss(self, predict, anti_predict, z_q, z_p, q_neg_log_probs, p_neg_log_probs,
q_baseline, p_baseline, q_mask, p_mask, label):
reward_tuple = self.get_advantages(predict, anti_predict, label, z_q, z_p,
q_neg_log_probs, p_neg_log_probs,
q_baseline, p_baseline, q_mask, p_mask)
# reward_tuple = self.get_listwise_advantages(predict, anti_predict, label, z_q, z_p,
# q_neg_log_probs, p_neg_log_probs,
# q_baseline, p_baseline, q_mask, p_mask)
q_advantages, p_advantages, q_rewards, p_rewards, continuity_loss, sparsity_loss = reward_tuple
# (batch_size, q_length)
q_advantages_expand_ = q_advantages.unsqueeze(-1).expand_as(q_neg_log_probs)
p_advantages_expand_ = p_advantages.unsqueeze(-1).expand_as(p_neg_log_probs)
q_rl_loss = torch.sum(q_neg_log_probs * q_advantages_expand_ * q_mask)
p_rl_loss = torch.sum(p_neg_log_probs * p_advantages_expand_ * p_mask)
rl_loss = (q_rl_loss + p_rl_loss) / 2
return rl_loss, q_rewards, p_rewards, continuity_loss, sparsity_loss
| 44.044369
| 173
| 0.605011
| 5,122
| 38,715
| 4.157946
| 0.042952
| 0.009203
| 0.032023
| 0.018312
| 0.820726
| 0.79861
| 0.766352
| 0.759591
| 0.749167
| 0.734986
| 0
| 0.006889
| 0.298851
| 38,715
| 878
| 174
| 44.094533
| 0.777675
| 0.114762
| 0
| 0.73913
| 0
| 0
| 0.00595
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057312
| false
| 0.001976
| 0.025692
| 0
| 0.132411
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e3e31e5a8742fd5a924868f474069a97f6dc43ce
| 118
|
py
|
Python
|
commonlibs/__init__.py
|
floatingstarZ/CommonLibs
|
9609d5a655a13fad27ae7828977815e982ae1de4
|
[
"CNRI-Python"
] | null | null | null |
commonlibs/__init__.py
|
floatingstarZ/CommonLibs
|
9609d5a655a13fad27ae7828977815e982ae1de4
|
[
"CNRI-Python"
] | null | null | null |
commonlibs/__init__.py
|
floatingstarZ/CommonLibs
|
9609d5a655a13fad27ae7828977815e982ae1de4
|
[
"CNRI-Python"
] | null | null | null |
from .math_tools import *
from .transform_tools import *
from .drawing_tools import *
from .common_tools import *
| 13.111111
| 30
| 0.762712
| 16
| 118
| 5.375
| 0.4375
| 0.511628
| 0.523256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169492
| 118
| 8
| 31
| 14.75
| 0.877551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5816906b1a3a753511319b6f2fd9e8d0229f51ce
| 7,287
|
py
|
Python
|
test/test_compute_inventory.py
|
jhdark/divHretention
|
702c4b58f1721917d665134b9bc85287cb002c23
|
[
"MIT"
] | 9
|
2021-06-08T16:23:36.000Z
|
2021-12-16T16:58:10.000Z
|
test/test_compute_inventory.py
|
jhdark/divHretention
|
702c4b58f1721917d665134b9bc85287cb002c23
|
[
"MIT"
] | 15
|
2021-06-10T08:12:51.000Z
|
2021-06-24T08:08:16.000Z
|
test/test_compute_inventory.py
|
jhdark/divHretention
|
702c4b58f1721917d665134b9bc85287cb002c23
|
[
"MIT"
] | 1
|
2021-06-11T14:59:13.000Z
|
2021-06-11T14:59:13.000Z
|
import time
import pytest
import numpy as np
import divHretention
def test_fecth_inventory_and_error():
"""Checks that fetch_inventory_and_error adds entries to database_inv_sig
and that the execution time is smaller when fetching an already existing
entry
"""
# build
for key in divHretention.database_inv_sig:
# ensuring an empty database
del divHretention.database_inv_sig[key]
# test
test_time = 1e3
start_time = time.time()
inv, sig = divHretention.fetch_inventory_and_error(test_time)
long_time = time.time() - start_time
start_time = time.time()
inv, sig = divHretention.fetch_inventory_and_error(test_time)
short_time = time.time() - start_time
assert test_time in divHretention.database_inv_sig
assert short_time < long_time
def test_compute_inventory():
"""Checks that compute_inventory runs correctly
"""
T = [1000]
c_max = [1e20]
time = 1e3
inv, sig = divHretention.compute_inventory(T, c_max, time)
assert len(inv) == len(sig)
assert len(inv) == len(T)
def test_compute_inventory_float():
"""Checks that compute_inventory raises a TypeError when a float is given
"""
T = 1000
c_max = 1e20
time = 1e3
with pytest.raises(TypeError):
inv, sig = divHretention.compute_inventory(T, c_max, time)
def test_compute_c_max_h():
"""Runs compute_c_max with isotope H and checks that the correct value is
produced
"""
# build
T = np.array([600, 500])
E_ion = np.array([20, 10])
E_atom = np.array([30, 40])
angles_ion = np.array([60, 60])
angles_atom = np.array([60, 60])
ion_flux = np.array([1e21, 1e20])
atom_flux = np.array([2e21, 2e20])
# run
c_max = divHretention.compute_c_max(
T, E_ion, E_atom, angles_ion, angles_atom,
ion_flux, atom_flux, full_export=False, isotope="H")
# test
D_0_W = 1.9e-7
E_D_W = 0.2
k_B = 8.617e-5
D = D_0_W*np.exp(-E_D_W/k_B/T)
# implantation ranges
implantation_range_ions = [
float(divHretention.implantation_range(energy, angle))
for energy, angle in zip(E_ion, angles_ion)]
implantation_range_atoms = [
float(divHretention.implantation_range(energy, angle))
for energy, angle in zip(E_atom, angles_atom)]
# reflection coefficients
reflection_coeff_ions = [
float(divHretention.reflection_coeff(energy, angle))
for energy, angle in zip(E_ion, angles_ion)]
reflection_coeff_atoms = [
float(divHretention.reflection_coeff(energy, angle))
for energy, angle in zip(E_atom, angles_atom)]
reflection_coeff_ions = np.array(reflection_coeff_ions)
reflection_coeff_atoms = np.array(reflection_coeff_atoms)
c_max_ions = (1 - reflection_coeff_ions) * \
ion_flux*implantation_range_ions/D
c_max_atoms = (1 - reflection_coeff_atoms) * \
atom_flux*implantation_range_atoms/D
c_max_expected = c_max_ions + c_max_atoms
assert c_max.all() == c_max_expected.all()
def test_compute_c_max_D():
"""Runs compute_c_max with isotope D and checks that the correct value is
produced
"""
# build
T = np.array([600, 500])
E_ion = np.array([20, 10])
E_atom = np.array([30, 40])
angles_ion = np.array([60, 60])
angles_atom = np.array([60, 60])
ion_flux = np.array([1e21, 1e20])
atom_flux = np.array([2e21, 2e20])
# run
c_max = divHretention.compute_c_max(
T, E_ion, E_atom, angles_ion, angles_atom,
ion_flux, atom_flux, full_export=False, isotope="D")
# test
D_0_W = 1.9e-7
E_D_W = 0.2
k_B = 8.617e-5
D = D_0_W*np.exp(-E_D_W/k_B/T)
D *= 1/2**0.5
# implantation ranges
implantation_range_ions = [
float(divHretention.implantation_range(energy, angle))
for energy, angle in zip(E_ion, angles_ion)]
implantation_range_atoms = [
float(divHretention.implantation_range(energy, angle))
for energy, angle in zip(E_atom, angles_atom)]
# reflection coefficients
reflection_coeff_ions = [
float(divHretention.reflection_coeff(energy, angle))
for energy, angle in zip(E_ion, angles_ion)]
reflection_coeff_atoms = [
float(divHretention.reflection_coeff(energy, angle))
for energy, angle in zip(E_atom, angles_atom)]
reflection_coeff_ions = np.array(reflection_coeff_ions)
reflection_coeff_atoms = np.array(reflection_coeff_atoms)
c_max_ions = (1 - reflection_coeff_ions) * \
ion_flux*implantation_range_ions/D
c_max_atoms = (1 - reflection_coeff_atoms) * \
atom_flux*implantation_range_atoms/D
c_max_expected = c_max_ions + c_max_atoms
assert c_max.all() == c_max_expected.all()
def test_compute_c_max_D():
"""Runs compute_c_max with isotope T and checks that the correct value is
produced
"""
# build
T = np.array([600, 500])
E_ion = np.array([20, 10])
E_atom = np.array([30, 40])
angles_ion = np.array([60, 60])
angles_atom = np.array([60, 60])
ion_flux = np.array([1e21, 1e20])
atom_flux = np.array([2e21, 2e20])
# run
c_max = divHretention.compute_c_max(
T, E_ion, E_atom, angles_ion, angles_atom,
ion_flux, atom_flux, full_export=False, isotope="T")
# test
D_0_W = 1.9e-7
E_D_W = 0.2
k_B = 8.617e-5
D = D_0_W*np.exp(-E_D_W/k_B/T)
D *= 1/3**0.5
# implantation ranges
implantation_range_ions = [
float(divHretention.implantation_range(energy, angle))
for energy, angle in zip(E_ion, angles_ion)]
implantation_range_atoms = [
float(divHretention.implantation_range(energy, angle))
for energy, angle in zip(E_atom, angles_atom)]
# reflection coefficients
reflection_coeff_ions = [
float(divHretention.reflection_coeff(energy, angle))
for energy, angle in zip(E_ion, angles_ion)]
reflection_coeff_atoms = [
float(divHretention.reflection_coeff(energy, angle))
for energy, angle in zip(E_atom, angles_atom)]
reflection_coeff_ions = np.array(reflection_coeff_ions)
reflection_coeff_atoms = np.array(reflection_coeff_atoms)
c_max_ions = (1 - reflection_coeff_ions) * \
ion_flux*implantation_range_ions/D
c_max_atoms = (1 - reflection_coeff_atoms) * \
atom_flux*implantation_range_atoms/D
c_max_expected = c_max_ions + c_max_atoms
assert c_max.all() == c_max_expected.all()
assert c_max.all() == c_max_expected.all()
def test_compute_c_max_output():
"""Runs compute_c_max and checks that the correct output
"""
# build
T = np.array([600, 500])
E_ion = np.array([20, 10])
E_atom = np.array([30, 40])
angles_ion = np.array([60, 60])
angles_atom = np.array([60, 60])
ion_flux = np.array([1e21, 1e20])
atom_flux = np.array([2e21, 2e20])
# run
output = divHretention.compute_c_max(
T, E_ion, E_atom, angles_ion, angles_atom,
ion_flux, atom_flux, full_export=True)
# test
assert len(output) == 3
# run
output = divHretention.compute_c_max(
T, E_ion, E_atom, angles_ion, angles_atom,
ion_flux, atom_flux, full_export=False)
# test
assert len(output) == 2
| 30.48954
| 77
| 0.668725
| 1,073
| 7,287
| 4.235788
| 0.105312
| 0.037844
| 0.031463
| 0.052805
| 0.862266
| 0.831243
| 0.825523
| 0.816722
| 0.816722
| 0.79736
| 0
| 0.036899
| 0.226431
| 7,287
| 238
| 78
| 30.617647
| 0.769381
| 0.111706
| 0
| 0.79085
| 0
| 0
| 0.000471
| 0
| 0
| 0
| 0
| 0
| 0.065359
| 1
| 0.045752
| false
| 0
| 0.026144
| 0
| 0.071895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5865d2095f5aef7b5d410f84cf5c4f6a5e52d643
| 12,518
|
py
|
Python
|
Core/Solvers/MSSP/Deterministic_Solver.py
|
zztcok/SNAC_PSNAC
|
9119c325c2114ac7034362b5349ffc5b2ce895d6
|
[
"Apache-2.0"
] | 1
|
2020-12-22T23:04:59.000Z
|
2020-12-22T23:04:59.000Z
|
Core/Solvers/MSSP/Deterministic_Solver.py
|
zztcok/SNAC_PSNAC
|
9119c325c2114ac7034362b5349ffc5b2ce895d6
|
[
"Apache-2.0"
] | null | null | null |
Core/Solvers/MSSP/Deterministic_Solver.py
|
zztcok/SNAC_PSNAC
|
9119c325c2114ac7034362b5349ffc5b2ce895d6
|
[
"Apache-2.0"
] | 1
|
2020-12-21T21:46:04.000Z
|
2020-12-21T21:46:04.000Z
|
import os
import sys
import Core.DataImport.parse_data_cmds as parse_data_cmds
import Core.DataImport.import_data_class as import_data_class
from pyomo.environ import *
from pyomo.opt import SolverFactory
import itertools
from pyutilib.misc import Options
import time as timer
import pdb
import Core.scenario_class as scenario_class
import Core.Solvers.MSSP.defunction as defunction
import Core.Valuation as Valuation
import Core.Solvers.MTSSP.M2S_item as M2S_item
import gc
import random
#import resource
def Deterministic_PRDP_Solve(mipgap, model_data, output_directory):
### Start Solution Timer
start_time = timer.clock()
#init_mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
##Solver Choice
opt = SolverFactory("cplex")
options = Options()
opt.options.mip_tolerances_mipgap = mipgap
##########################################
### Generate Scenario
##########################################
#### Problem Info For Scenario Generation
num_product = len(model_data._data['product'][None])
prod = model_data._data['product'][None]
num_trial = len(model_data._data['trial'][None])
sg = model_data._data['trial'][None]
prob = model_data._data['probability']
num_ts = len(model_data._data['time_step'][None])
### Generate all possible outcomes
Outcomes = itertools.product(range(num_trial + 1), repeat = num_product)
Outcomes = tuple(Outcomes)
### From Outcomes Name and Generate Scenarios
scenario = 1
List_of_Scenarios = {}
SS=[]
for items in Outcomes:
scenario_name = scenario
List_of_Scenarios[scenario_name] = scenario_class.scenario(items,prob, prod,sg)
SS.append(scenario_name)
scenario += 1
##########################################################
### Input Parameters to Solver
##########################################################
rev_max = {}
gammaL = {}
gammaD = {}
duration = {}
trial_cost = {}
revenue_max = {}
success = {}
rev_run = {}
rev_open = {}
discounting_factor ={}
##Set product
product = model_data._data['product'][None]
##Set stage_gate
stage_gate = model_data._data['trial'][None]
## Set time step
time_step = model_data._data['time_step'][None]
##Set resource type
resource_type = model_data._data['resource_type'][None]
## Set duration
duration = model_data._data['trial_duration']
## Set trial cost
trial_cost = model_data._data['trial_cost']
## Set Discount Values
for items in model_data._data['gammaL']:
gammaL[items[0]] = model_data._data['gammaL'][items]
for items in model_data._data['gammaD']:
gammaD[items[0]] = model_data._data['gammaD'][items]
## Set Maximum Revenue
for items in model_data._data['maximum_revenue']:
revenue_max[items[0]] = model_data._data['maximum_revenue'][items]
## Set Last Trial
last_trial = len(stage_gate)
last_time_step = len(time_step)
##Calculate Success matrix
success = M2S_item.calc_success(product, num_trial, List_of_Scenarios)
## Calculate running rev
rev_run = M2S_item.calc_rr(revenue_max,gammaL,duration, product, stage_gate, time_step)
##Calculate open rev
rev_open = M2S_item.calc_openrev(revenue_max,gammaL,duration, product, stage_gate, time_step, last_time_step)
##Calculate Discounting Factor
discounting_factor = M2S_item.calc_discounting_factor(revenue_max,gammaL,trial_cost, product, stage_gate, last_time_step)
## Set Probabilities and Outcomes
pb = {}
outcome = {}
for s in SS:
pb[s] = List_of_Scenarios[s].probability
outcome[s] = List_of_Scenarios[s].outcome
resource_max = {}
for items in model_data._data['max_resource']:
resource_max[items[0]] = model_data._data['max_resource'][items]
resource_required = {}
resource_required = model_data._data['resource_requirement']
#######################################################################
### Generate Non-Anticipativity Constraints
#######################################################################
OC = {}
for s in SS:
OC[s] = []
for i in prod:
OC[s].append(List_of_Scenarios[s].outcome[prod.index(i)])
phi= {}
phii= {}
phij ={}
for s in SS:
for sp in SS:
if sp > s:
for i in prod:
OCtest = list(OC[s])
OCtest[prod.index(i)] += 1
OCtest2 = list(OC[s])
OCtest2[prod.index(i)] += -1
if OCtest == OC[sp]:
trl = OC[s][prod.index(i)] + 1
phi[(s,sp)] = 1
phii[(s,sp)] = i
phij[(s,sp)] = trl
if OCtest2 == OC[sp]:
trl = OC[sp][prod.index(i)] + 1
phi[(s,sp)] = 1
phii[(s,sp)] = i
phij[(s,sp)] = trl
############################################
### Solve Model
############################################
print("Generating Model")
model = defunction.de(prod,sg,time_step,resource_type,SS,resource_max,gammaL,gammaD,duration,trial_cost,resource_required, revenue_max,pb, success,last_time_step, last_trial, rev_run, rev_open, discounting_factor, phi, phii, phij, outcome)
print("Solving Model")
sttmr = timer.clock()
results= opt.solve(model)
fttmr = timer.clock()
#fin_mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
model.solutions.load_from(results)
print("Solve Complete")
print("Generating Results")
### Make Output Directory
if not os.path.exists(output_directory):
os.makedirs(output_directory)
save_file = "Deterministic_Solution"
results.write(filename = os.path.join(output_directory, save_file))
full_model_NAC_count = len(model.NAC_Constraint) + len(model.NAC2_Constraint) + len(model.NAC3_Constraint)
print('full_model_NAC_count',full_model_NAC_count)
print('scenario pairs', len(phi))
Finish_Time = timer.clock()
Total_Solve_Time = fttmr - sttmr
Total_Time = Finish_Time - start_time
Objective_Value = results['Problem'][0]['Lower bound']
### Generate New File Name
save_file = "Output"
### Open save file
f = open(os.path.join(output_directory, save_file), "w")
### Generate file contents
algorithm_time = 'Total Solve Time:' + ' ' + str(Total_Solve_Time)
f.write(algorithm_time + '\n')
algorithm_time = 'Total Time:' + ' ' + str(Total_Time)
f.write(algorithm_time + '\n')
objective = "ENPV:" + " " + str(Objective_Value)
f.write(objective + '\n')
f.write('full_model_NAC_count' + ' ' + full_model_NAC_count + '\n')
f.write('scenario pairs' + ' ' + len(phi) + '\n')
#total_resource = "Total Memory:" + " " + str(fin_mem-init_mem)
#f.write(total_resource + "\n")
f.close()
def deterministic_PRDP_solve_with_return(mipgap, model_data, output_directory):
### Start Solution Timer
start_time = timer.clock()
init_mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
##Solver Choice
opt = SolverFactory("cplex")
options = Options()
opt.options.mip_tolerances_mipgap = mipgap
##########################################
### Generate Scenario
##########################################
#### Problem Info For Scenario Generation
num_product = len(model_data._data['product'][None])
prod = model_data._data['product'][None]
num_trial = len(model_data._data['trial'][None])
sg = model_data._data['trial'][None]
prob = model_data._data['probability']
num_ts = len(model_data._data['time_step'][None])
### Generate all possible outcomes
Outcomes = itertools.product(range(num_trial + 1), repeat = num_product)
Outcomes = tuple(Outcomes)
### From Outcomes Name and Generate Scenarios
scenario = 1
List_of_Scenarios = {}
SS=[]
for items in Outcomes:
scenario_name = scenario
List_of_Scenarios[scenario_name] = scenario_class.scenario(items,prob, prod,sg)
SS.append(scenario_name)
scenario += 1
##########################################################
### Input Parameters to Solver
##########################################################
rev_max = {}
gammaL = {}
gammaD = {}
duration = {}
trial_cost = {}
revenue_max = {}
success = {}
rev_run = {}
rev_open = {}
discounting_factor ={}
##Set product
product = model_data._data['product'][None]
##Set stage_gate
stage_gate = model_data._data['trial'][None]
## Set time step
time_step = model_data._data['time_step'][None]
##Set resource type
resource_type = model_data._data['resource_type'][None]
## Set duration
duration = model_data._data['trial_duration']
## Set trial cost
trial_cost = model_data._data['trial_cost']
## Set Discount Values
for items in model_data._data['gammaL']:
gammaL[items[0]] = model_data._data['gammaL'][items]
for items in model_data._data['gammaD']:
gammaD[items[0]] = model_data._data['gammaD'][items]
## Set Maximum Revenue
for items in model_data._data['maximum_revenue']:
revenue_max[items[0]] = model_data._data['maximum_revenue'][items]
## Set Last Trial
last_trial = len(stage_gate)
last_time_step = len(time_step)
##Calculate Success matrix
success = M2S_item.calc_success(product, num_trial, List_of_Scenarios)
## Calculate running rev
rev_run = M2S_item.calc_rr(revenue_max,gammaL,duration, product, stage_gate, time_step)
##Calculate open rev
rev_open = M2S_item.calc_openrev(revenue_max,gammaL,duration, product, stage_gate, time_step, last_time_step)
##Calculate Discounting Factor
discounting_factor = M2S_item.calc_discounting_factor(revenue_max,gammaL,trial_cost, product, stage_gate, last_time_step)
## Set Probabilities and Outcomes
pb = {}
outcome = {}
for s in SS:
pb[s] = List_of_Scenarios[s].probability
outcome[s] = List_of_Scenarios[s].outcome
resource_max = {}
for items in model_data._data['max_resource']:
resource_max[items[0]] = model_data._data['max_resource'][items]
resource_required = {}
resource_required = model_data._data['resource_requirement']
#######################################################################
### Generate Non-Anticipativity Constraints
#######################################################################
OC = {}
for s in SS:
OC[s] = []
for i in prod:
OC[s].append(List_of_Scenarios[s].outcome[prod.index(i)])
phi= {}
phii= {}
phij ={}
for s in SS:
for sp in SS:
if sp > s:
for i in prod:
OCtest = list(OC[s])
OCtest[prod.index(i)] += 1
OCtest2 = list(OC[s])
OCtest2[prod.index(i)] += -1
if OCtest == OC[sp]:
trl = OC[s][prod.index(i)] + 1
phi[(s,sp)] = 1
phii[(s,sp)] = i
phij[(s,sp)] = trl
if OCtest2 == OC[sp]:
trl = OC[sp][prod.index(i)] + 1
phi[(s,sp)] = 1
phii[(s,sp)] = i
phij[(s,sp)] = trl
############################################
### Solve Model
############################################
model = defunction.de(prod,sg,time_step,resource_type,SS,resource_max,gammaL,gammaD,duration,trial_cost,resource_required, revenue_max,pb, success,last_time_step, last_trial, rev_run, rev_open, discounting_factor, phi, phii, phij, outcome)
sttmr = timer.clock()
results= opt.solve(model)
fttmr = timer.clock()
fin_mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
model.solutions.load_from(results)
Scenario_Results = {}
for t in time_step:
for s in SS:
for i in product:
for j in stage_gate:
if model.Decision_X[i,j,t,s].value == 1:
index = product.index(i)
jndex = stage_gate.index(j)
tndx = time_step.index(t)
try:
Scenario_Results[(i,j,t)]
except:
Scenario_Results[(i,j,t)] = 1
### Make Output Directory
if not os.path.exists(output_directory):
os.makedirs(output_directory)
save_file = "Deterministic_Solution"
results.write(filename = os.path.join(output_directory, save_file))
Finish_Time = timer.clock()
Total_Solve_Time = fttmr - sttmr
Total_Time = Finish_Time - start_time
Objective_Value = results['Problem'][0]['Lower bound']
### Generate New File Name
save_file = "Output"
### Open save file
f = open(os.path.join(output_directory, save_file), "w")
### Generate file contents
algorithm_time = 'Total Solve Time:' + ' ' + str(Total_Solve_Time)
f.write(algorithm_time + '\n')
algorithm_time = 'Total Time:' + ' ' + str(Total_Time)
f.write(algorithm_time + '\n')
objective = "ENPV:" + " " + str(Objective_Value)
f.write(objective + '\n')
total_resource = "Total Memory:" + " " + str(fin_mem-init_mem)
f.write(total_resource + "\n")
f.write(str(Scenario_Results) + "\n")
f.close()
from Core.Solvers.MSSP.MSSP_Results_Object import MSSP_Results_Object
return_object = MSSP_Results_Object(Objective_Value, Total_Solve_Time,(fin_mem-init_mem), Total_Time)
return return_object
| 28.193694
| 240
| 0.650983
| 1,655
| 12,518
| 4.680363
| 0.116616
| 0.051123
| 0.070488
| 0.023238
| 0.866383
| 0.860315
| 0.860315
| 0.860315
| 0.851536
| 0.851536
| 0
| 0.004547
| 0.156654
| 12,518
| 443
| 241
| 28.257336
| 0.729184
| 0.112478
| 0
| 0.819231
| 0
| 0
| 0.073287
| 0.004352
| 0.026923
| 0
| 0
| 0
| 0
| 1
| 0.007692
| false
| 0
| 0.065385
| 0
| 0.076923
| 0.023077
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
54630eb486a7e3d0ba796897e18771a8c0bff018
| 2,512
|
py
|
Python
|
AdventOfCode2016/Day13/Day13.py
|
MattTitmas/AdventOfCode
|
36be4f6bf973f77ff93b08dc69c977bb11951f27
|
[
"MIT"
] | null | null | null |
AdventOfCode2016/Day13/Day13.py
|
MattTitmas/AdventOfCode
|
36be4f6bf973f77ff93b08dc69c977bb11951f27
|
[
"MIT"
] | null | null | null |
AdventOfCode2016/Day13/Day13.py
|
MattTitmas/AdventOfCode
|
36be4f6bf973f77ff93b08dc69c977bb11951f27
|
[
"MIT"
] | null | null | null |
from math import prod
def part1():
file = int(open("input.txt","r").read())
wantedxPos, wantedyPos = 31, 39
foundPositions = {}
currentDistances = {(1, 1): 0}
while (wantedxPos, wantedyPos) not in foundPositions:
currentX, currentY = -1, -1
lowestDistance = float("inf")
for (key, value) in currentDistances.items():
if value < lowestDistance:
currentX, currentY = key
lowestDistance = value
currentDistances.pop((currentX, currentY))
foundPositions[(currentX, currentY)] = lowestDistance
neighbours = []
for i in range(max(0,currentX-1), currentX+2):
for j in range(max(0, currentY-1), currentY+2):
if abs(currentX-i) != abs(currentY-j) and not sum(int(x) for x in bin(i*i + 3*i + 2*i*j + j + j*j + file)[2:]) % 2:
neighbours.append((i, j))
for neighbour in neighbours:
if neighbour not in foundPositions:
currentDistances[neighbour] = lowestDistance + 1 if neighbour not in currentDistances else min(currentDistances[neighbour], lowestDistance+1)
return foundPositions[(31,39)]
def part2():
file = int(open("input.txt","r").read())
foundPositions = {}
currentDistances = {(1, 1): 0}
def minDist():
val = float("inf")
for value in currentDistances.values():
val = min(val, value)
return val
while minDist() < 51:
currentX, currentY = -1, -1
lowestDistance = float("inf")
for (key, value) in currentDistances.items():
if value < lowestDistance:
currentX, currentY = key
lowestDistance = value
currentDistances.pop((currentX, currentY))
foundPositions[(currentX, currentY)] = lowestDistance
neighbours = []
for i in range(max(0,currentX-1), currentX+2):
for j in range(max(0, currentY-1), currentY+2):
if abs(currentX-i) != abs(currentY-j) and not sum(int(x) for x in bin(i*i + 3*i + 2*i*j + j + j*j + file)[2:]) % 2:
neighbours.append((i, j))
for neighbour in neighbours:
if neighbour not in foundPositions:
currentDistances[neighbour] = lowestDistance + 1 if neighbour not in currentDistances else min(currentDistances[neighbour], lowestDistance+1)
return len(foundPositions)
print(f"Answer to part 1: {part1()}")
print(f"Answer to part 2: {part2()}")
| 41.180328
| 157
| 0.591959
| 296
| 2,512
| 5.023649
| 0.212838
| 0.086079
| 0.0269
| 0.02959
| 0.839274
| 0.770679
| 0.770679
| 0.738399
| 0.738399
| 0.738399
| 0
| 0.027824
| 0.284634
| 2,512
| 61
| 158
| 41.180328
| 0.799666
| 0
| 0
| 0.716981
| 0
| 0
| 0.033028
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.018868
| 0
| 0.132075
| 0.037736
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
54b5bdbac3de796c5c8406022f0f5315a0a047ea
| 99,800
|
py
|
Python
|
tests/test_grid.py
|
obeezzy/lpminimk3
|
395264d30cb1813beb49aad107db0c3ab1210ae0
|
[
"MIT"
] | 3
|
2021-10-12T17:06:57.000Z
|
2022-02-25T21:58:47.000Z
|
tests/test_grid.py
|
obeezzy/lpminimk3
|
395264d30cb1813beb49aad107db0c3ab1210ae0
|
[
"MIT"
] | 1
|
2021-10-12T21:05:15.000Z
|
2021-10-12T21:05:15.000Z
|
tests/test_grid.py
|
obeezzy/lpminimk3
|
395264d30cb1813beb49aad107db0c3ab1210ae0
|
[
"MIT"
] | null | null | null |
import unittest
from lpminimk3.__init__ import Grid, ButtonEvent
from lpminimk3.colors import ColorPalette,\
ColorShadeStore
from tests._vlpminimk3 import VirtualMidiEvent,\
create_virtual_launchpad
class TestGrid(unittest.TestCase):
def setUp(self):
self.lp = create_virtual_launchpad()
def tearDown(self):
self.lp.close()
def test_launchpad(self):
self.lp.open()
self.assertEqual(self.lp.grid.launchpad,
self.lp,
'Launchpad mismatch.')
def test_max_x(self):
self.lp.open()
self.assertEqual(self.lp.grid.max_x,
7,
'Max X mismatch.')
def test_max_y(self):
self.lp.open()
self.assertEqual(self.lp.grid.max_y,
7,
'Max Y mismatch.')
def test_width(self):
self.lp.open()
self.assertEqual(self.lp.grid.width,
8,
'Width mismatch.')
def test_height(self):
self.lp.open()
self.assertEqual(self.lp.grid.height,
8,
'Height mismatch.')
def test_eq(self):
self.lp.open()
another_lp = create_virtual_launchpad(client_id=99)
another_lp.open()
self.assertTrue(self.lp.grid == self.lp.grid,
'Grid mismatch.')
self.assertTrue(self.lp.grid != another_lp.grid,
'Grid mismatch.')
class TestLed(unittest.TestCase):
def setUp(self):
self.lp = create_virtual_launchpad()
def tearDown(self):
self.lp.close()
def test_grid_led(self):
self.lp.open()
another_lp = create_virtual_launchpad(client_id=99)
another_lp.open()
self.assertTrue(self.lp.grid.led(0, 0) == self.lp.grid.led(0, 0),
'LED mismatch.')
self.assertTrue(self.lp.grid.led(0, 0) != another_lp.grid.led(0, 0),
'LED mismatch.')
self.assertTrue(self.lp.grid.led(0, 0) == self.lp.grid.led(0, 0, layout=Grid.CUSTOM), # noqa
'LED mismatch.')
self.assertTrue(self.lp.grid.led(0, 0) != another_lp.grid.led(0, 0, layout=Grid.CUSTOM), # noqa
'LED mismatch.')
def test_grid_panel_led(self):
self.lp.open()
another_lp = create_virtual_launchpad(client_id=99)
another_lp.open()
self.assertTrue(self.lp.grid.led(0, 0) == self.lp.panel.led(0, 1),
'LED mismatch.')
self.assertTrue(self.lp.grid.led(0, 0) != another_lp.panel.led(0, 1),
'LED mismatch.')
self.assertTrue(self.lp.grid.led(0, 0) == self.lp.panel.led(0, 1, layout=Grid.CUSTOM), # noqa
'LED mismatch.')
self.assertTrue(self.lp.grid.led(0, 0) != another_lp.panel.led(0, 1, layout=Grid.CUSTOM), # noqa
'LED mismatch.')
def test_set_by_index(self):
self.lp.open()
for color_index in range(128):
self.lp.grid.led('0x0').color = color_index
with self.assertRaises(ValueError):
self.lp.grid.led('0x0').color = -1
with self.assertRaises(ValueError):
self.lp.grid.led('0x0').color = 128
def test_set_by_led_range(self):
self.lp.open()
for led in self.lp.grid.led_range():
for color_index in range(128):
led.color = color_index
def test_set_by_shade(self):
self.lp.open()
self.assertEqual(len(ColorShadeStore.COLOR_GROUPS),
len(ColorShadeStore.COLOR_GROUP_SYMBOLS),
'Color group to color group symbol mismatch.')
self.lp.grid.led('0x0').color = ColorPalette.Red.SHADE_1
self.lp.grid.led('0x0').color = ColorPalette.Orange.SHADE_1
self.lp.grid.led('0x0').color = ColorPalette.Yellow.SHADE_1
self.lp.grid.led('0x0').color = ColorPalette.Green.SHADE_1
self.lp.grid.led('0x0').color = ColorPalette.Blue.SHADE_1
self.lp.grid.led('0x0').color = ColorPalette.Violet.SHADE_1
self.lp.grid.led('0x0').color = ColorPalette.White.SHADE_1
self.lp.grid.led('0x0').color = 'r'
self.lp.grid.led('0x0').color = 'o'
self.lp.grid.led('0x0').color = 'y'
self.lp.grid.led('0x0').color = 'g'
self.lp.grid.led('0x0').color = 'b'
self.lp.grid.led('0x0').color = 'v'
self.lp.grid.led('0x0').color = 'w'
self.lp.grid.led('0x0').color = 'r1'
self.lp.grid.led('0x0').color = 'o1'
self.lp.grid.led('0x0').color = 'y1'
self.lp.grid.led('0x0').color = 'g1'
self.lp.grid.led('0x0').color = 'b1'
self.lp.grid.led('0x0').color = 'v1'
self.lp.grid.led('0x0').color = 'w1'
self.lp.grid.led('0x0').color = 'red'
self.lp.grid.led('0x0').color = 'orange'
self.lp.grid.led('0x0').color = 'yellow'
self.lp.grid.led('0x0').color = 'green'
self.lp.grid.led('0x0').color = 'blue'
self.lp.grid.led('0x0').color = 'violet'
self.lp.grid.led('0x0').color = 'white'
self.lp.grid.led('0x0').color = 'red1'
self.lp.grid.led('0x0').color = 'orange1'
self.lp.grid.led('0x0').color = 'yellow1'
self.lp.grid.led('0x0').color = 'green1'
self.lp.grid.led('0x0').color = 'blue1'
self.lp.grid.led('0x0').color = 'violet1'
self.lp.grid.led('0x0').color = 'red0'
self.lp.grid.led('0x0').color = 'orange0'
self.lp.grid.led('0x0').color = 'yellow0'
self.lp.grid.led('0x0').color = 'green0'
self.lp.grid.led('0x0').color = 'blue0'
self.lp.grid.led('0x0').color = 'violet0'
self.lp.grid.led('0x0').color = 'white0'
with self.assertRaises(ValueError):
self.lp.grid.led('0x0').color = '1r'
with self.assertRaises(ValueError):
self.lp.grid.led('0x0').color = 're'
with self.assertRaises(ValueError):
self.lp.grid.led('0x0').color = 'gree3'
with self.assertRaises(TypeError):
self.lp.grid.led('0x0').color = (0, 0)
with self.assertRaises(ValueError):
self.lp.grid.led('0x0').color = 'blue-0'
with self.assertRaises(ValueError):
self.lp.grid.led('0x0').color = 'yellow-1'
def test_reset(self):
self.lp.open()
self.lp.grid.led('0x0').color = 1
self.lp.grid.led('0x0').reset()
def test_led_range(self):
self.lp.open()
for led in self.lp.grid.led_range():
for color_index in range(128):
led.color = color_index
def test_id_by_xy(self):
self.lp.open()
self.assertEqual(self.lp.grid.led(0, 0).id, 1, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 0).id, 2, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 0).id, 3, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 0).id, 4, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 0).id, 5, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 0).id, 6, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 0).id, 7, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 0).id, 8, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 1).id, 9, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 1).id, 10, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 1).id, 11, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 1).id, 12, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 1).id, 13, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 1).id, 14, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 1).id, 15, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 1).id, 16, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 2).id, 17, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 2).id, 18, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 2).id, 19, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 2).id, 20, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 2).id, 21, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 2).id, 22, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 2).id, 23, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 2).id, 24, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 3).id, 25, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 3).id, 26, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 3).id, 27, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 3).id, 28, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 3).id, 29, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 3).id, 30, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 3).id, 31, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 3).id, 32, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 4).id, 33, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 4).id, 34, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 4).id, 35, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 4).id, 36, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 4).id, 37, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 4).id, 38, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 4).id, 39, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 4).id, 40, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 5).id, 41, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 5).id, 42, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 5).id, 43, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 5).id, 44, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 5).id, 45, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 5).id, 46, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 5).id, 47, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 5).id, 48, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 6).id, 49, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 6).id, 50, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 6).id, 51, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 6).id, 52, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 6).id, 53, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 6).id, 54, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 6).id, 55, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 6).id, 56, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 7).id, 57, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 7).id, 58, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 7).id, 59, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 7).id, 60, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 7).id, 61, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 7).id, 62, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 7).id, 63, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 7).id, 64, 'ID mismatch.') # noqa
def test_x_by_xy(self):
self.lp.open()
self.assertEqual(self.lp.grid.led(0, 0).x, 0, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 0).x, 1, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 0).x, 2, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 0).x, 3, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 0).x, 4, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 0).x, 5, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 0).x, 6, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 0).x, 7, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 1).x, 0, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 1).x, 1, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 1).x, 2, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 1).x, 3, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 1).x, 4, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 1).x, 5, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 1).x, 6, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 1).x, 7, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 2).x, 0, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 2).x, 1, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 2).x, 2, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 2).x, 3, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 2).x, 4, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 2).x, 5, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 2).x, 6, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 2).x, 7, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 3).x, 0, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 3).x, 1, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 3).x, 2, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 3).x, 3, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 3).x, 4, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 3).x, 5, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 3).x, 6, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 3).x, 7, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 4).x, 0, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 4).x, 1, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 4).x, 2, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 4).x, 3, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 4).x, 4, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 4).x, 5, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 4).x, 6, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 4).x, 7, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 5).x, 0, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 5).x, 1, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 5).x, 2, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 5).x, 3, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 5).x, 4, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 5).x, 5, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 5).x, 6, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 5).x, 7, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 6).x, 0, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 6).x, 1, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 6).x, 2, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 6).x, 3, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 6).x, 4, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 6).x, 5, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 6).x, 6, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 6).x, 7, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 7).x, 0, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 7).x, 1, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 7).x, 2, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 7).x, 3, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 7).x, 4, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 7).x, 5, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 7).x, 6, 'X mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 7).x, 7, 'X mismatch.') # noqa
def test_y_by_xy(self):
self.lp.open()
self.assertEqual(self.lp.grid.led(0, 0).y, 0, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 0).y, 0, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 0).y, 0, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 0).y, 0, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 0).y, 0, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 0).y, 0, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 0).y, 0, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 0).y, 0, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 1).y, 1, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 1).y, 1, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 1).y, 1, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 1).y, 1, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 1).y, 1, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 1).y, 1, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 1).y, 1, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 1).y, 1, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 2).y, 2, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 2).y, 2, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 2).y, 2, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 2).y, 2, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 2).y, 2, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 2).y, 2, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 2).y, 2, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 2).y, 2, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 3).y, 3, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 3).y, 3, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 3).y, 3, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 3).y, 3, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 3).y, 3, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 3).y, 3, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 3).y, 3, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 3).y, 3, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 4).y, 4, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 4).y, 4, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 4).y, 4, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 4).y, 4, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 4).y, 4, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 4).y, 4, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 4).y, 4, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 4).y, 4, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 5).y, 5, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 5).y, 5, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 5).y, 5, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 5).y, 5, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 5).y, 5, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 5).y, 5, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 5).y, 5, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 5).y, 5, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 6).y, 6, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 6).y, 6, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 6).y, 6, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 6).y, 6, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 6).y, 6, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 6).y, 6, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 6).y, 6, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 6).y, 6, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 7).y, 7, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 7).y, 7, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 7).y, 7, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 7).y, 7, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 7).y, 7, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 7).y, 7, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 7).y, 7, 'Y mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 7).y, 7, 'Y mismatch.') # noqa
def test_name_by_name(self):
self.lp.open()
self.assertEqual(self.lp.grid.led(0, 0).name, '0x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 0).name, '1x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 0).name, '2x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 0).name, '3x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 0).name, '4x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 0).name, '5x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 0).name, '6x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 0).name, '7x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 1).name, '0x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 1).name, '1x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 1).name, '2x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 1).name, '3x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 1).name, '4x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 1).name, '5x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 1).name, '6x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 1).name, '7x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 2).name, '0x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 2).name, '1x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 2).name, '2x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 2).name, '3x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 2).name, '4x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 2).name, '5x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 2).name, '6x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 2).name, '7x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 3).name, '0x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 3).name, '1x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 3).name, '2x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 3).name, '3x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 3).name, '4x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 3).name, '5x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 3).name, '6x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 3).name, '7x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 4).name, '0x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 4).name, '1x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 4).name, '2x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 4).name, '3x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 4).name, '4x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 4).name, '5x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 4).name, '6x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 4).name, '7x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 5).name, '0x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 5).name, '1x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 5).name, '2x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 5).name, '3x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 5).name, '4x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 5).name, '5x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 5).name, '6x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 5).name, '7x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 6).name, '0x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 6).name, '1x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 6).name, '2x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 6).name, '3x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 6).name, '4x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 6).name, '5x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 6).name, '6x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 6).name, '7x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 7).name, '0x7', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 7).name, '1x7', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 7).name, '2x7', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 7).name, '3x7', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 7).name, '4x7', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 7).name, '5x7', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 7).name, '6x7', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 7).name, '7x7', 'Name mismatch.') # noqa
def test_color_by_xy(self):
self.lp.open()
self.assertEqual(self.lp.grid.led(0, 0).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 0).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 0).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 0).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 0).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 0).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 0).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 0).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 1).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 1).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 1).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 1).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 1).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 1).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 1).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 1).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 2).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 2).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 2).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 2).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 2).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 2).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 2).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 2).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 3).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 3).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 3).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 3).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 3).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 3).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 3).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 3).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 4).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 4).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 4).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 4).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 4).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 4).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 4).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 4).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 5).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 5).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 5).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 5).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 5).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 5).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 5).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 5).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 6).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 6).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 6).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 6).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 6).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 6).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 6).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 6).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 7).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 7).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 7).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 7).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 7).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 7).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 7).color, None, 'Color mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 7).color, None, 'Color mismatch.') # noqa
def test_name_by_id(self):
self.lp.open()
self.assertEqual(self.lp.grid.led('0x0').id, 1, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('1x0').id, 2, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('2x0').id, 3, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('3x0').id, 4, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('4x0').id, 5, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('5x0').id, 6, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('6x0').id, 7, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('7x0').id, 8, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('0x1').id, 9, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('1x1').id, 10, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('2x1').id, 11, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('3x1').id, 12, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('4x1').id, 13, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('5x1').id, 14, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('6x1').id, 15, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('7x1').id, 16, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('0x2').id, 17, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('1x2').id, 18, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('2x2').id, 19, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('3x2').id, 20, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('4x2').id, 21, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('5x2').id, 22, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('6x2').id, 23, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('7x2').id, 24, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('0x3').id, 25, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('1x3').id, 26, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('2x3').id, 27, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('3x3').id, 28, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('4x3').id, 29, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('5x3').id, 30, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('6x3').id, 31, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('7x3').id, 32, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('0x4').id, 33, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('1x4').id, 34, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('2x4').id, 35, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('3x4').id, 36, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('4x4').id, 37, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('5x4').id, 38, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('6x4').id, 39, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('7x4').id, 40, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('0x5').id, 41, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('1x5').id, 42, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('2x5').id, 43, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('3x5').id, 44, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('4x5').id, 45, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('5x5').id, 46, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('6x5').id, 47, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('7x5').id, 48, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('0x6').id, 49, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('1x6').id, 50, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('2x6').id, 51, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('3x6').id, 52, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('4x6').id, 53, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('5x6').id, 54, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('6x6').id, 55, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('7x6').id, 56, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('0x7').id, 57, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('1x7').id, 58, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('2x7').id, 59, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('3x7').id, 60, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('4x7').id, 61, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('5x7').id, 62, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('6x7').id, 63, 'ID mismatch.') # noqa
self.assertEqual(self.lp.grid.led('7x7').id, 64, 'ID mismatch.') # noqa
with self.assertRaises(ValueError):
self.lp.grid.led('')
with self.assertRaises(ValueError):
self.lp.grid.led('s')
def test_id_by_name(self):
self.lp.open()
self.assertEqual(self.lp.grid.led(0).name, '0x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1).name, '1x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2).name, '2x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3).name, '3x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4).name, '4x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5).name, '5x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6).name, '6x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7).name, '7x0', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(8).name, '0x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(9).name, '1x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(10).name, '2x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(11).name, '3x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(12).name, '4x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(13).name, '5x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(14).name, '6x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(15).name, '7x1', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(16).name, '0x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(17).name, '1x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(18).name, '2x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(19).name, '3x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(20).name, '4x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(21).name, '5x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(22).name, '6x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(23).name, '7x2', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(24).name, '0x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(25).name, '1x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(26).name, '2x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(27).name, '3x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(28).name, '4x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(29).name, '5x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(30).name, '6x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(31).name, '7x3', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(32).name, '0x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(33).name, '1x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(34).name, '2x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(35).name, '3x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(36).name, '4x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(37).name, '5x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(38).name, '6x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(39).name, '7x4', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(40).name, '0x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(41).name, '1x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(42).name, '2x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(43).name, '3x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(44).name, '4x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(45).name, '5x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(46).name, '6x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(47).name, '7x5', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(48).name, '0x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(49).name, '1x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(50).name, '2x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(51).name, '3x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(52).name, '4x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(53).name, '5x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(54).name, '6x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(55).name, '7x6', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(56).name, '0x7', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(57).name, '1x7', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(58).name, '2x7', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(59).name, '3x7', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(60).name, '4x7', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(61).name, '5x7', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(62).name, '6x7', 'Name mismatch.') # noqa
self.assertEqual(self.lp.grid.led(63).name, '7x7', 'Name mismatch.') # noqa
def test_midi_value_prog_layout(self):
self.lp.open()
self.assertEqual(self.lp.grid.led(0, 0).midi_value, 0x51, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 0).midi_value, 0x52, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 0).midi_value, 0x53, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 0).midi_value, 0x54, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 0).midi_value, 0x55, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 0).midi_value, 0x56, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 0).midi_value, 0x57, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 0).midi_value, 0x58, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 1).midi_value, 0x47, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 1).midi_value, 0x48, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 1).midi_value, 0x49, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 1).midi_value, 0x4a, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 1).midi_value, 0x4b, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 1).midi_value, 0x4c, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 1).midi_value, 0x4d, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 1).midi_value, 0x4e, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 2).midi_value, 0x3d, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 2).midi_value, 0x3e, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 2).midi_value, 0x3f, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 2).midi_value, 0x40, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 2).midi_value, 0x41, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 2).midi_value, 0x42, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 2).midi_value, 0x43, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 2).midi_value, 0x44, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 3).midi_value, 0x33, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 3).midi_value, 0x34, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 3).midi_value, 0x35, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 3).midi_value, 0x36, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 3).midi_value, 0x37, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 3).midi_value, 0x38, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 3).midi_value, 0x39, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 3).midi_value, 0x3a, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 4).midi_value, 0x29, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 4).midi_value, 0x2a, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 4).midi_value, 0x2b, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 4).midi_value, 0x2c, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 4).midi_value, 0x2d, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 4).midi_value, 0x2e, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 4).midi_value, 0x2f, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 4).midi_value, 0x30, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 5).midi_value, 0x1f, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 5).midi_value, 0x20, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 5).midi_value, 0x21, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 5).midi_value, 0x22, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 5).midi_value, 0x23, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 5).midi_value, 0x24, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 5).midi_value, 0x25, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 5).midi_value, 0x26, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 6).midi_value, 0x15, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 6).midi_value, 0x16, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 6).midi_value, 0x17, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 6).midi_value, 0x18, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 6).midi_value, 0x19, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 6).midi_value, 0x1a, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 6).midi_value, 0x1b, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 6).midi_value, 0x1c, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 7).midi_value, 0x0b, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 7).midi_value, 0x0c, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 7).midi_value, 0x0d, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 7).midi_value, 0x0e, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 7).midi_value, 0x0f, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 7).midi_value, 0x10, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 7).midi_value, 0x11, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 7).midi_value, 0x12, 'MIDI value mismatch.') # noqa
def test_midi_value_custom_layout(self):
self.lp.open()
self.assertEqual(self.lp.grid.led(0, 0, layout=Grid.CUSTOM).midi_value, 0x40, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 0, layout=Grid.CUSTOM).midi_value, 0x41, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 0, layout=Grid.CUSTOM).midi_value, 0x42, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 0, layout=Grid.CUSTOM).midi_value, 0x43, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 0, layout=Grid.CUSTOM).midi_value, 0x60, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 0, layout=Grid.CUSTOM).midi_value, 0x61, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 0, layout=Grid.CUSTOM).midi_value, 0x62, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 0, layout=Grid.CUSTOM).midi_value, 0x63, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 1, layout=Grid.CUSTOM).midi_value, 0x3c, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 1, layout=Grid.CUSTOM).midi_value, 0x3d, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 1, layout=Grid.CUSTOM).midi_value, 0x3e, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 1, layout=Grid.CUSTOM).midi_value, 0x3f, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 1, layout=Grid.CUSTOM).midi_value, 0x5c, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 1, layout=Grid.CUSTOM).midi_value, 0x5d, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 1, layout=Grid.CUSTOM).midi_value, 0x5e, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 1, layout=Grid.CUSTOM).midi_value, 0x5f, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 2, layout=Grid.CUSTOM).midi_value, 0x38, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 2, layout=Grid.CUSTOM).midi_value, 0x39, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 2, layout=Grid.CUSTOM).midi_value, 0x3a, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 2, layout=Grid.CUSTOM).midi_value, 0x3b, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 2, layout=Grid.CUSTOM).midi_value, 0x58, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 2, layout=Grid.CUSTOM).midi_value, 0x59, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 2, layout=Grid.CUSTOM).midi_value, 0x5a, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 2, layout=Grid.CUSTOM).midi_value, 0x5b, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 3, layout=Grid.CUSTOM).midi_value, 0x34, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 3, layout=Grid.CUSTOM).midi_value, 0x35, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 3, layout=Grid.CUSTOM).midi_value, 0x36, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 3, layout=Grid.CUSTOM).midi_value, 0x37, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 3, layout=Grid.CUSTOM).midi_value, 0x54, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 3, layout=Grid.CUSTOM).midi_value, 0x55, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 3, layout=Grid.CUSTOM).midi_value, 0x56, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 3, layout=Grid.CUSTOM).midi_value, 0x57, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 4, layout=Grid.CUSTOM).midi_value, 0x30, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 4, layout=Grid.CUSTOM).midi_value, 0x31, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 4, layout=Grid.CUSTOM).midi_value, 0x32, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 4, layout=Grid.CUSTOM).midi_value, 0x33, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 4, layout=Grid.CUSTOM).midi_value, 0x50, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 4, layout=Grid.CUSTOM).midi_value, 0x51, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 4, layout=Grid.CUSTOM).midi_value, 0x52, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 4, layout=Grid.CUSTOM).midi_value, 0x53, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 5, layout=Grid.CUSTOM).midi_value, 0x2c, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 5, layout=Grid.CUSTOM).midi_value, 0x2d, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 5, layout=Grid.CUSTOM).midi_value, 0x2e, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 5, layout=Grid.CUSTOM).midi_value, 0x2f, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 5, layout=Grid.CUSTOM).midi_value, 0x4c, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 5, layout=Grid.CUSTOM).midi_value, 0x4d, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 5, layout=Grid.CUSTOM).midi_value, 0x4e, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 5, layout=Grid.CUSTOM).midi_value, 0x4f, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 6, layout=Grid.CUSTOM).midi_value, 0x28, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 6, layout=Grid.CUSTOM).midi_value, 0x29, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 6, layout=Grid.CUSTOM).midi_value, 0x2a, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 6, layout=Grid.CUSTOM).midi_value, 0x2b, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 6, layout=Grid.CUSTOM).midi_value, 0x48, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 6, layout=Grid.CUSTOM).midi_value, 0x49, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 6, layout=Grid.CUSTOM).midi_value, 0x4a, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 6, layout=Grid.CUSTOM).midi_value, 0x4b, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(0, 7, layout=Grid.CUSTOM).midi_value, 0x24, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(1, 7, layout=Grid.CUSTOM).midi_value, 0x25, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(2, 7, layout=Grid.CUSTOM).midi_value, 0x26, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(3, 7, layout=Grid.CUSTOM).midi_value, 0x27, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(4, 7, layout=Grid.CUSTOM).midi_value, 0x44, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(5, 7, layout=Grid.CUSTOM).midi_value, 0x45, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(6, 7, layout=Grid.CUSTOM).midi_value, 0x46, 'MIDI value mismatch.') # noqa
self.assertEqual(self.lp.grid.led(7, 7, layout=Grid.CUSTOM).midi_value, 0x47, 'MIDI value mismatch.') # noqa
class TestButtonGroup(unittest.TestCase):
def setUp(self):
self.lp = create_virtual_launchpad()
def tearDown(self):
self.lp.close()
def test_names_by_name(self):
self.lp.open()
self.assertCountEqual(['0x0'],
self.lp.grid.buttons('0x0').names,
'Button name mismatch.')
self.assertCountEqual(['1x0'],
self.lp.grid.buttons('1x0').names,
'Button name mismatch.')
self.assertCountEqual(['2x0'],
self.lp.grid.buttons('2x0').names,
'Button name mismatch.')
self.assertCountEqual(['3x0'],
self.lp.grid.buttons('3x0').names,
'Button name mismatch.')
self.assertCountEqual(['4x0'],
self.lp.grid.buttons('4x0').names,
'Button name mismatch.')
self.assertCountEqual(['5x0'],
self.lp.grid.buttons('5x0').names,
'Button name mismatch.')
self.assertCountEqual(['6x0'],
self.lp.grid.buttons('6x0').names,
'Button name mismatch.')
self.assertCountEqual(['7x0'],
self.lp.grid.buttons('7x0').names,
'Button name mismatch.')
self.assertCountEqual(['0x1'],
self.lp.grid.buttons('0x1').names,
'Button name mismatch.')
self.assertCountEqual(['1x1'],
self.lp.grid.buttons('1x1').names,
'Button name mismatch.')
self.assertCountEqual(['2x1'],
self.lp.grid.buttons('2x1').names,
'Button name mismatch.')
self.assertCountEqual(['3x1'],
self.lp.grid.buttons('3x1').names,
'Button name mismatch.')
self.assertCountEqual(['4x1'],
self.lp.grid.buttons('4x1').names,
'Button name mismatch.')
self.assertCountEqual(['5x1'],
self.lp.grid.buttons('5x1').names,
'Button name mismatch.')
self.assertCountEqual(['6x1'],
self.lp.grid.buttons('6x1').names,
'Button name mismatch.')
self.assertCountEqual(['7x1'],
self.lp.grid.buttons('7x1').names,
'Button name mismatch.')
self.assertCountEqual(['0x2'],
self.lp.grid.buttons('0x2').names,
'Button name mismatch.')
self.assertCountEqual(['1x2'],
self.lp.grid.buttons('1x2').names,
'Button name mismatch.')
self.assertCountEqual(['2x2'],
self.lp.grid.buttons('2x2').names,
'Button name mismatch.')
self.assertCountEqual(['3x2'],
self.lp.grid.buttons('3x2').names,
'Button name mismatch.')
self.assertCountEqual(['4x2'],
self.lp.grid.buttons('4x2').names,
'Button name mismatch.')
self.assertCountEqual(['5x2'],
self.lp.grid.buttons('5x2').names,
'Button name mismatch.')
self.assertCountEqual(['6x2'],
self.lp.grid.buttons('6x2').names,
'Button name mismatch.')
self.assertCountEqual(['7x2'],
self.lp.grid.buttons('7x2').names,
'Button name mismatch.')
self.assertCountEqual(['0x3'],
self.lp.grid.buttons('0x3').names,
'Button name mismatch.')
self.assertCountEqual(['1x3'],
self.lp.grid.buttons('1x3').names,
'Button name mismatch.')
self.assertCountEqual(['2x3'],
self.lp.grid.buttons('2x3').names,
'Button name mismatch.')
self.assertCountEqual(['3x3'],
self.lp.grid.buttons('3x3').names,
'Button name mismatch.')
self.assertCountEqual(['4x3'],
self.lp.grid.buttons('4x3').names,
'Button name mismatch.')
self.assertCountEqual(['5x3'],
self.lp.grid.buttons('5x3').names,
'Button name mismatch.')
self.assertCountEqual(['6x3'],
self.lp.grid.buttons('6x3').names,
'Button name mismatch.')
self.assertCountEqual(['7x3'],
self.lp.grid.buttons('7x3').names,
'Button name mismatch.')
self.assertCountEqual(['0x4'],
self.lp.grid.buttons('0x4').names,
'Button name mismatch.')
self.assertCountEqual(['1x4'],
self.lp.grid.buttons('1x4').names,
'Button name mismatch.')
self.assertCountEqual(['2x4'],
self.lp.grid.buttons('2x4').names,
'Button name mismatch.')
self.assertCountEqual(['3x4'],
self.lp.grid.buttons('3x4').names,
'Button name mismatch.')
self.assertCountEqual(['4x4'],
self.lp.grid.buttons('4x4').names,
'Button name mismatch.')
self.assertCountEqual(['5x4'],
self.lp.grid.buttons('5x4').names,
'Button name mismatch.')
self.assertCountEqual(['6x4'],
self.lp.grid.buttons('6x4').names,
'Button name mismatch.')
self.assertCountEqual(['7x4'],
self.lp.grid.buttons('7x4').names,
'Button name mismatch.')
self.assertCountEqual(['0x5'],
self.lp.grid.buttons('0x5').names,
'Button name mismatch.')
self.assertCountEqual(['1x5'],
self.lp.grid.buttons('1x5').names,
'Button name mismatch.')
self.assertCountEqual(['2x5'],
self.lp.grid.buttons('2x5').names,
'Button name mismatch.')
self.assertCountEqual(['3x5'],
self.lp.grid.buttons('3x5').names,
'Button name mismatch.')
self.assertCountEqual(['4x5'],
self.lp.grid.buttons('4x5').names,
'Button name mismatch.')
self.assertCountEqual(['5x5'],
self.lp.grid.buttons('5x5').names,
'Button name mismatch.')
self.assertCountEqual(['6x5'],
self.lp.grid.buttons('6x5').names,
'Button name mismatch.')
self.assertCountEqual(['7x5'],
self.lp.grid.buttons('7x5').names,
'Button name mismatch.')
self.assertCountEqual(['0x6'],
self.lp.grid.buttons('0x6').names,
'Button name mismatch.')
self.assertCountEqual(['1x6'],
self.lp.grid.buttons('1x6').names,
'Button name mismatch.')
self.assertCountEqual(['2x6'],
self.lp.grid.buttons('2x6').names,
'Button name mismatch.')
self.assertCountEqual(['3x6'],
self.lp.grid.buttons('3x6').names,
'Button name mismatch.')
self.assertCountEqual(['4x6'],
self.lp.grid.buttons('4x6').names,
'Button name mismatch.')
self.assertCountEqual(['5x6'],
self.lp.grid.buttons('5x6').names,
'Button name mismatch.')
self.assertCountEqual(['6x6'],
self.lp.grid.buttons('6x6').names,
'Button name mismatch.')
self.assertCountEqual(['7x6'],
self.lp.grid.buttons('7x6').names,
'Button name mismatch.')
self.assertCountEqual(['0x7'],
self.lp.grid.buttons('0x7').names,
'Button name mismatch.')
self.assertCountEqual(['1x7'],
self.lp.grid.buttons('1x7').names,
'Button name mismatch.')
self.assertCountEqual(['2x7'],
self.lp.grid.buttons('2x7').names,
'Button name mismatch.')
self.assertCountEqual(['3x7'],
self.lp.grid.buttons('3x7').names,
'Button name mismatch.')
self.assertCountEqual(['4x7'],
self.lp.grid.buttons('4x7').names,
'Button name mismatch.')
self.assertCountEqual(['5x7'],
self.lp.grid.buttons('5x7').names,
'Button name mismatch.')
self.assertCountEqual(['6x7'],
self.lp.grid.buttons('6x7').names,
'Button name mismatch.')
self.assertCountEqual(['7x7'],
self.lp.grid.buttons('7x7').names,
'Button name mismatch.')
self.assertCountEqual(['0x0', '5x5', '7x7'],
self.lp.grid.buttons('0x0', '5x5', '7x7').names, # noqa
'Button name mismatch.')
self.assertCountEqual(['0x0'],
self.lp.grid.buttons('0x0', '0x0', '0x0').names, # noqa
'Button name mismatch.')
self.assertCountEqual(['0x0', '1x0', '2x0', '3x0', '4x0', '5x0', '6x0', '7x0', # noqa
'0x1', '1x1', '2x1', '3x1', '4x1', '5x1', '6x1', '7x1', # noqa
'0x2', '1x2', '2x2', '3x2', '4x2', '5x2', '6x2', '7x2', # noqa
'0x3', '1x3', '2x3', '3x3', '4x3', '5x3', '6x3', '7x3', # noqa
'0x4', '1x4', '2x4', '3x4', '4x4', '5x4', '6x4', '7x4', # noqa
'0x5', '1x5', '2x5', '3x5', '4x5', '5x5', '6x5', '7x5', # noqa
'0x6', '1x6', '2x6', '3x6', '4x6', '5x6', '6x6', '7x6', # noqa
'0x7', '1x7', '2x7', '3x7', '4x7', '5x7', '6x7', '7x7'], # noqa
self.lp.grid.buttons().names,
'Button name mismatch.')
with self.assertRaises(ValueError):
self.lp.grid.buttons(None).names
with self.assertRaises(ValueError):
self.lp.grid.buttons('').names
def test_names_by_id(self):
self.lp.open()
self.assertCountEqual(['0x0'],
self.lp.grid.buttons(0).names,
'Button name mismatch.')
self.assertCountEqual(['1x0'],
self.lp.grid.buttons(1).names,
'Button name mismatch.')
self.assertCountEqual(['2x0'],
self.lp.grid.buttons(2).names,
'Button name mismatch.')
self.assertCountEqual(['3x0'],
self.lp.grid.buttons(3).names,
'Button name mismatch.')
self.assertCountEqual(['4x0'],
self.lp.grid.buttons(4).names,
'Button name mismatch.')
self.assertCountEqual(['5x0'],
self.lp.grid.buttons(5).names,
'Button name mismatch.')
self.assertCountEqual(['6x0'],
self.lp.grid.buttons(6).names,
'Button name mismatch.')
self.assertCountEqual(['7x0'],
self.lp.grid.buttons(7).names,
'Button name mismatch.')
self.assertCountEqual(['0x1'],
self.lp.grid.buttons(8).names,
'Button name mismatch.')
self.assertCountEqual(['1x1'],
self.lp.grid.buttons(9).names,
'Button name mismatch.')
self.assertCountEqual(['2x1'],
self.lp.grid.buttons(10).names,
'Button name mismatch.')
self.assertCountEqual(['3x1'],
self.lp.grid.buttons(11).names,
'Button name mismatch.')
self.assertCountEqual(['4x1'],
self.lp.grid.buttons(12).names,
'Button name mismatch.')
self.assertCountEqual(['5x1'],
self.lp.grid.buttons(13).names,
'Button name mismatch.')
self.assertCountEqual(['6x1'],
self.lp.grid.buttons(14).names,
'Button name mismatch.')
self.assertCountEqual(['7x1'],
self.lp.grid.buttons(15).names,
'Button name mismatch.')
self.assertCountEqual(['0x2'],
self.lp.grid.buttons(16).names,
'Button name mismatch.')
self.assertCountEqual(['1x2'],
self.lp.grid.buttons(17).names,
'Button name mismatch.')
self.assertCountEqual(['2x2'],
self.lp.grid.buttons(18).names,
'Button name mismatch.')
self.assertCountEqual(['3x2'],
self.lp.grid.buttons(19).names,
'Button name mismatch.')
self.assertCountEqual(['4x2'],
self.lp.grid.buttons(20).names,
'Button name mismatch.')
self.assertCountEqual(['5x2'],
self.lp.grid.buttons(21).names,
'Button name mismatch.')
self.assertCountEqual(['6x2'],
self.lp.grid.buttons(22).names,
'Button name mismatch.')
self.assertCountEqual(['7x2'],
self.lp.grid.buttons(23).names,
'Button name mismatch.')
self.assertCountEqual(['0x3'],
self.lp.grid.buttons(24).names,
'Button name mismatch.')
self.assertCountEqual(['1x3'],
self.lp.grid.buttons(25).names,
'Button name mismatch.')
self.assertCountEqual(['2x3'],
self.lp.grid.buttons(26).names,
'Button name mismatch.')
self.assertCountEqual(['3x3'],
self.lp.grid.buttons(27).names,
'Button name mismatch.')
self.assertCountEqual(['4x3'],
self.lp.grid.buttons(28).names,
'Button name mismatch.')
self.assertCountEqual(['5x3'],
self.lp.grid.buttons(29).names,
'Button name mismatch.')
self.assertCountEqual(['6x3'],
self.lp.grid.buttons(30).names,
'Button name mismatch.')
self.assertCountEqual(['7x3'],
self.lp.grid.buttons(31).names,
'Button name mismatch.')
self.assertCountEqual(['0x4'],
self.lp.grid.buttons(32).names,
'Button name mismatch.')
self.assertCountEqual(['1x4'],
self.lp.grid.buttons(33).names,
'Button name mismatch.')
self.assertCountEqual(['2x4'],
self.lp.grid.buttons(34).names,
'Button name mismatch.')
self.assertCountEqual(['3x4'],
self.lp.grid.buttons(35).names,
'Button name mismatch.')
self.assertCountEqual(['4x4'],
self.lp.grid.buttons(36).names,
'Button name mismatch.')
self.assertCountEqual(['5x4'],
self.lp.grid.buttons(37).names,
'Button name mismatch.')
self.assertCountEqual(['6x4'],
self.lp.grid.buttons(38).names,
'Button name mismatch.')
self.assertCountEqual(['7x4'],
self.lp.grid.buttons(39).names,
'Button name mismatch.')
self.assertCountEqual(['0x5'],
self.lp.grid.buttons(40).names,
'Button name mismatch.')
self.assertCountEqual(['1x5'],
self.lp.grid.buttons(41).names,
'Button name mismatch.')
self.assertCountEqual(['2x5'],
self.lp.grid.buttons(42).names,
'Button name mismatch.')
self.assertCountEqual(['3x5'],
self.lp.grid.buttons(43).names,
'Button name mismatch.')
self.assertCountEqual(['4x5'],
self.lp.grid.buttons(44).names,
'Button name mismatch.')
self.assertCountEqual(['5x5'],
self.lp.grid.buttons(45).names,
'Button name mismatch.')
self.assertCountEqual(['6x5'],
self.lp.grid.buttons(46).names,
'Button name mismatch.')
self.assertCountEqual(['7x5'],
self.lp.grid.buttons(47).names,
'Button name mismatch.')
self.assertCountEqual(['0x6'],
self.lp.grid.buttons(48).names,
'Button name mismatch.')
self.assertCountEqual(['1x6'],
self.lp.grid.buttons(49).names,
'Button name mismatch.')
self.assertCountEqual(['2x6'],
self.lp.grid.buttons(50).names,
'Button name mismatch.')
self.assertCountEqual(['3x6'],
self.lp.grid.buttons(51).names,
'Button name mismatch.')
self.assertCountEqual(['4x6'],
self.lp.grid.buttons(52).names,
'Button name mismatch.')
self.assertCountEqual(['5x6'],
self.lp.grid.buttons(53).names,
'Button name mismatch.')
self.assertCountEqual(['6x6'],
self.lp.grid.buttons(54).names,
'Button name mismatch.')
self.assertCountEqual(['7x6'],
self.lp.grid.buttons(55).names,
'Button name mismatch.')
self.assertCountEqual(['0x7'],
self.lp.grid.buttons(56).names,
'Button name mismatch.')
self.assertCountEqual(['1x7'],
self.lp.grid.buttons(57).names,
'Button name mismatch.')
self.assertCountEqual(['2x7'],
self.lp.grid.buttons(58).names,
'Button name mismatch.')
self.assertCountEqual(['3x7'],
self.lp.grid.buttons(59).names,
'Button name mismatch.')
self.assertCountEqual(['4x7'],
self.lp.grid.buttons(60).names,
'Button name mismatch.')
self.assertCountEqual(['5x7'],
self.lp.grid.buttons(61).names,
'Button name mismatch.')
self.assertCountEqual(['6x7'],
self.lp.grid.buttons(62).names,
'Button name mismatch.')
self.assertCountEqual(['7x7'],
self.lp.grid.buttons(63).names,
'Button name mismatch.')
self.assertCountEqual(['0x0'],
self.lp.grid.buttons(0, 0, 0).names,
'Button name mismatch.')
self.assertCountEqual(['0x0'],
self.lp.grid.buttons((0, 0)).names,
'Button name mismatch.')
self.assertCountEqual(['1x0'],
self.lp.grid.buttons((1, 0)).names,
'Button name mismatch.')
self.assertCountEqual(['2x0'],
self.lp.grid.buttons((2, 0)).names,
'Button name mismatch.')
self.assertCountEqual(['3x0'],
self.lp.grid.buttons((3, 0)).names,
'Button name mismatch.')
self.assertCountEqual(['4x0'],
self.lp.grid.buttons((4, 0)).names,
'Button name mismatch.')
self.assertCountEqual(['5x0'],
self.lp.grid.buttons((5, 0)).names,
'Button name mismatch.')
self.assertCountEqual(['6x0'],
self.lp.grid.buttons((6, 0)).names,
'Button name mismatch.')
self.assertCountEqual(['7x0'],
self.lp.grid.buttons((7, 0)).names,
'Button name mismatch.')
self.assertCountEqual(['0x1'],
self.lp.grid.buttons((0, 1)).names,
'Button name mismatch.')
self.assertCountEqual(['1x1'],
self.lp.grid.buttons((1, 1)).names,
'Button name mismatch.')
self.assertCountEqual(['2x1'],
self.lp.grid.buttons((2, 1)).names,
'Button name mismatch.')
self.assertCountEqual(['3x1'],
self.lp.grid.buttons((3, 1)).names,
'Button name mismatch.')
self.assertCountEqual(['4x1'],
self.lp.grid.buttons((4, 1)).names,
'Button name mismatch.')
self.assertCountEqual(['5x1'],
self.lp.grid.buttons((5, 1)).names,
'Button name mismatch.')
self.assertCountEqual(['6x1'],
self.lp.grid.buttons((6, 1)).names,
'Button name mismatch.')
self.assertCountEqual(['7x1'],
self.lp.grid.buttons((7, 1)).names,
'Button name mismatch.')
self.assertCountEqual(['0x2'],
self.lp.grid.buttons((0, 2)).names,
'Button name mismatch.')
self.assertCountEqual(['1x2'],
self.lp.grid.buttons((1, 2)).names,
'Button name mismatch.')
self.assertCountEqual(['2x2'],
self.lp.grid.buttons((2, 2)).names,
'Button name mismatch.')
self.assertCountEqual(['3x2'],
self.lp.grid.buttons((3, 2)).names,
'Button name mismatch.')
self.assertCountEqual(['4x2'],
self.lp.grid.buttons((4, 2)).names,
'Button name mismatch.')
self.assertCountEqual(['5x2'],
self.lp.grid.buttons((5, 2)).names,
'Button name mismatch.')
self.assertCountEqual(['6x2'],
self.lp.grid.buttons((6, 2)).names,
'Button name mismatch.')
self.assertCountEqual(['7x2'],
self.lp.grid.buttons((7, 2)).names,
'Button name mismatch.')
self.assertCountEqual(['0x3'],
self.lp.grid.buttons((0, 3)).names,
'Button name mismatch.')
self.assertCountEqual(['1x3'],
self.lp.grid.buttons((1, 3)).names,
'Button name mismatch.')
self.assertCountEqual(['2x3'],
self.lp.grid.buttons((2, 3)).names,
'Button name mismatch.')
self.assertCountEqual(['3x3'],
self.lp.grid.buttons((3, 3)).names,
'Button name mismatch.')
self.assertCountEqual(['4x3'],
self.lp.grid.buttons((4, 3)).names,
'Button name mismatch.')
self.assertCountEqual(['5x3'],
self.lp.grid.buttons((5, 3)).names,
'Button name mismatch.')
self.assertCountEqual(['6x3'],
self.lp.grid.buttons((6, 3)).names,
'Button name mismatch.')
self.assertCountEqual(['7x3'],
self.lp.grid.buttons((7, 3)).names,
'Button name mismatch.')
self.assertCountEqual(['0x4'],
self.lp.grid.buttons((0, 4)).names,
'Button name mismatch.')
self.assertCountEqual(['1x4'],
self.lp.grid.buttons((1, 4)).names,
'Button name mismatch.')
self.assertCountEqual(['2x4'],
self.lp.grid.buttons((2, 4)).names,
'Button name mismatch.')
self.assertCountEqual(['3x4'],
self.lp.grid.buttons((3, 4)).names,
'Button name mismatch.')
self.assertCountEqual(['4x4'],
self.lp.grid.buttons((4, 4)).names,
'Button name mismatch.')
self.assertCountEqual(['5x4'],
self.lp.grid.buttons((5, 4)).names,
'Button name mismatch.')
self.assertCountEqual(['6x4'],
self.lp.grid.buttons((6, 4)).names,
'Button name mismatch.')
self.assertCountEqual(['7x4'],
self.lp.grid.buttons((7, 4)).names,
'Button name mismatch.')
self.assertCountEqual(['0x5'],
self.lp.grid.buttons((0, 5)).names,
'Button name mismatch.')
self.assertCountEqual(['1x5'],
self.lp.grid.buttons((1, 5)).names,
'Button name mismatch.')
self.assertCountEqual(['2x5'],
self.lp.grid.buttons((2, 5)).names,
'Button name mismatch.')
self.assertCountEqual(['3x5'],
self.lp.grid.buttons((3, 5)).names,
'Button name mismatch.')
self.assertCountEqual(['4x5'],
self.lp.grid.buttons((4, 5)).names,
'Button name mismatch.')
self.assertCountEqual(['5x5'],
self.lp.grid.buttons((5, 5)).names,
'Button name mismatch.')
self.assertCountEqual(['6x5'],
self.lp.grid.buttons((6, 5)).names,
'Button name mismatch.')
self.assertCountEqual(['7x5'],
self.lp.grid.buttons((7, 5)).names,
'Button name mismatch.')
self.assertCountEqual(['0x6'],
self.lp.grid.buttons((0, 6)).names,
'Button name mismatch.')
self.assertCountEqual(['1x6'],
self.lp.grid.buttons((1, 6)).names,
'Button name mismatch.')
self.assertCountEqual(['2x6'],
self.lp.grid.buttons((2, 6)).names,
'Button name mismatch.')
self.assertCountEqual(['3x6'],
self.lp.grid.buttons((3, 6)).names,
'Button name mismatch.')
self.assertCountEqual(['4x6'],
self.lp.grid.buttons((4, 6)).names,
'Button name mismatch.')
self.assertCountEqual(['5x6'],
self.lp.grid.buttons((5, 6)).names,
'Button name mismatch.')
self.assertCountEqual(['6x6'],
self.lp.grid.buttons((6, 6)).names,
'Button name mismatch.')
self.assertCountEqual(['7x6'],
self.lp.grid.buttons((7, 6)).names,
'Button name mismatch.')
self.assertCountEqual(['0x7'],
self.lp.grid.buttons((0, 7)).names,
'Button name mismatch.')
self.assertCountEqual(['1x7'],
self.lp.grid.buttons((1, 7)).names,
'Button name mismatch.')
self.assertCountEqual(['2x7'],
self.lp.grid.buttons((2, 7)).names,
'Button name mismatch.')
self.assertCountEqual(['3x7'],
self.lp.grid.buttons((3, 7)).names,
'Button name mismatch.')
self.assertCountEqual(['4x7'],
self.lp.grid.buttons((4, 7)).names,
'Button name mismatch.')
self.assertCountEqual(['5x7'],
self.lp.grid.buttons((5, 7)).names,
'Button name mismatch.')
self.assertCountEqual(['6x7'],
self.lp.grid.buttons((6, 7)).names,
'Button name mismatch.')
self.assertCountEqual(['7x7'],
self.lp.grid.buttons((7, 7)).names,
'Button name mismatch.')
self.assertCountEqual(['0x0', '5x5', '7x7'],
self.lp.grid.buttons((0, 0), (5, 5), (7, 7)).names, # noqa
'Button name mismatch.')
self.assertCountEqual(['0x0'],
self.lp.grid.buttons((0, 0), (0, 0), (0, 0)).names, # noqa
'Button name mismatch.')
def test_prog_layout_poll_event(self):
self.lp.open()
self.lp.will_return(midi_event=VirtualMidiEvent([0x90, 0x51, 0x0])) # noqa
self.assertEqual(self.lp.grid.buttons().poll_for_event().message,
VirtualMidiEvent([0x90, 0x51, 0x0]).message,
'MIDI message mismatch.')
def test_prog_layout_poll_event_with_input_string(self):
self.lp.open()
self.lp.will_return(midi_event=VirtualMidiEvent([0x90, 0x51, 0x7f])) # noqa
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='press').message, # noqa
VirtualMidiEvent([0x90, 0x51, 0x7f]).message,
'MIDI message mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='press').button.name, # noqa
'0x0',
'Button name mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='press').type, # noqa
ButtonEvent.PRESS,
'Event type mismatch.')
self.lp.will_return(midi_event=VirtualMidiEvent([0x90, 0x51, 0x0])) # noqa
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='release').message, # noqa
VirtualMidiEvent([0x90, 0x51, 0x0]).message,
'MIDI message mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='release').button.name, # noqa
'0x0',
'Button name mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='release').type, # noqa
ButtonEvent.RELEASE,
'Event type mismatch.')
self.lp.will_return(midi_event=VirtualMidiEvent([0x90, 0x51, 0x7f])) # noqa
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='press_release').message, # noqa
VirtualMidiEvent([0x90, 0x51, 0x7f]).message,
'MIDI message mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='press_release').button.name, # noqa
'0x0',
'Button name mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='press_release').type, # noqa
ButtonEvent.PRESS,
'Event type mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='PRESS').type, # noqa
ButtonEvent.PRESS,
'Event type mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='press|release').type, # noqa
ButtonEvent.PRESS,
'Event type mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='PRESS_RELEASE').type, # noqa
ButtonEvent.PRESS,
'Event type mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='PRESS|RELEASE').type, # noqa
ButtonEvent.PRESS,
'Event type mismatch.')
self.lp.will_return(midi_event=VirtualMidiEvent([0x90, 0x51, 0x0])) # noqa
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='press_release').message, # noqa
VirtualMidiEvent([0x90, 0x51, 0x0]).message,
'MIDI message mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='press_release').button.name, # noqa
'0x0',
'Button name mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='press_release').type, # noqa
ButtonEvent.RELEASE,
'Event type mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='RELEASE').type, # noqa
ButtonEvent.RELEASE,
'Event type mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='press|release').type, # noqa
ButtonEvent.RELEASE,
'Event type mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='PRESS_RELEASE').type, # noqa
ButtonEvent.RELEASE,
'Event type mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='PRESS|RELEASE').type, # noqa
ButtonEvent.RELEASE,
'Event type mismatch.')
self.lp.will_return(midi_event=VirtualMidiEvent([0x90, 0x51, 0x7f])) # noqa
with self.assertRaises(ValueError):
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='pr').message, # noqa
VirtualMidiEvent([0x90, 0x51, 0x7f]).message,
'MIDI message mismatch.')
with self.assertRaises(ValueError):
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='rel').message, # noqa
VirtualMidiEvent([0x90, 0x51, 0x7f]).message,
'MIDI message mismatch.')
self.lp.will_return(midi_event=VirtualMidiEvent([0x90, 0x51, 0x0])) # noqa
with self.assertRaises(ValueError):
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='pr').message, # noqa
VirtualMidiEvent([0x90, 0x51, 0x0]).message,
'MIDI message mismatch.')
with self.assertRaises(ValueError):
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type='rel').message, # noqa
VirtualMidiEvent([0x90, 0x51, 0x0]).message,
'MIDI message mismatch.')
def test_prog_layout_poll_event_with_button_event_constants(self):
self.lp.open()
self.lp.will_return(midi_event=VirtualMidiEvent([0x90, 0x51, 0x7f])) # noqa
self.assertEqual(self.lp.grid.buttons('up').poll_for_event(type=ButtonEvent.PRESS).message, # noqa
VirtualMidiEvent([0x90, 0x51, 0x7f]).message,
'MIDI message mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type=ButtonEvent.PRESS).button.name, # noqa
'0x0',
'MIDI message mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type=ButtonEvent.PRESS).type, # noqa
ButtonEvent.PRESS,
'Event type mismatch.')
self.lp.will_return(midi_event=VirtualMidiEvent([0x90, 0x51, 0x0])) # noqa
self.assertEqual(self.lp.grid.buttons('up').poll_for_event(type=ButtonEvent.RELEASE).message, # noqa
VirtualMidiEvent([0x90, 0x51, 0x0]).message,
'MIDI message mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type=ButtonEvent.RELEASE).button.name, # noqa
'0x0',
'Button name mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type=ButtonEvent.RELEASE).type, # noqa
ButtonEvent.RELEASE,
'Event type mismatch.')
self.lp.will_return(midi_event=VirtualMidiEvent([0x90, 0x51, 0x0])) # noqa
self.assertEqual(self.lp.grid.buttons('up').poll_for_event(type=ButtonEvent.PRESS_RELEASE).message, # noqa
VirtualMidiEvent([0x90, 0x51, 0x0]).message,
'MIDI message mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type=ButtonEvent.PRESS_RELEASE).button.name, # noqa
'0x0',
'Button name mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type=ButtonEvent.PRESS_RELEASE).type, # noqa
ButtonEvent.RELEASE,
'Event type mismatch.')
self.lp.will_return(midi_event=VirtualMidiEvent([0x90, 0x51, 0x7f])) # noqa
self.assertEqual(self.lp.grid.buttons('up').poll_for_event(type=ButtonEvent.PRESS_RELEASE).message, # noqa
VirtualMidiEvent([0x90, 0x51, 0x7f]).message,
'MIDI message mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type=ButtonEvent.PRESS_RELEASE).button.name, # noqa
'0x0',
'Button name mismatch.')
self.assertEqual(self.lp.grid.buttons('0x0').poll_for_event(type=ButtonEvent.PRESS_RELEASE).type, # noqa
ButtonEvent.PRESS,
'Event type mismatch.')
if __name__ == '__main__':
unittest.main()
| 63.892446
| 120
| 0.545561
| 12,333
| 99,800
| 4.38401
| 0.028055
| 0.103647
| 0.164053
| 0.154361
| 0.960254
| 0.956758
| 0.917585
| 0.911371
| 0.906248
| 0.899053
| 0
| 0.04957
| 0.292715
| 99,800
| 1,561
| 121
| 63.933376
| 0.716407
| 0.032004
| 0
| 0.388926
| 0
| 0
| 0.163485
| 0
| 0
| 0
| 0.013359
| 0
| 0.562375
| 1
| 0.022015
| false
| 0
| 0.002668
| 0
| 0.026684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3fa495192a436ede7d4182fec50dae31ca33f143
| 26,078
|
py
|
Python
|
eval.py
|
iwonasob/DCASE_rare
|
3f9f55a1958602ac61e2e5ab02866d7215a5d131
|
[
"MIT"
] | 2
|
2019-05-23T08:24:13.000Z
|
2019-08-19T08:53:31.000Z
|
eval.py
|
iwonasob/DCASE_rare
|
3f9f55a1958602ac61e2e5ab02866d7215a5d131
|
[
"MIT"
] | null | null | null |
eval.py
|
iwonasob/DCASE_rare
|
3f9f55a1958602ac61e2e5ab02866d7215a5d131
|
[
"MIT"
] | null | null | null |
import sed_eval
from IPython.core.debugger import Tracer
cl="gunshot"
file_list = [
# {
# 'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE_task2/mixed_audio/testing/list_"+cl+"_gt.txt",
# 'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE_task2/mixed_audio/results/W_mel_01_kls_10p_50n_4sh_1000lam_"+cl+".txt"
# },
# {
# 'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
# 'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_10n_4sh_500lam_gunshot.txt"
# },
# {
# 'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
# 'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_10n_4sh_500lam_gunshot.txt"
# },
# {
# 'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
# 'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_10n_4sh_5000lam_gunshot.txt"
# },
# {
# 'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
# 'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_10n_4sh_5000lam_gunshot.txt"
# },
# {
# 'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
# 'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_100n_4sh_500lam_gunshot.txt"
# },
# {
# 'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
# 'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_100n_4sh_500lam_gunshot.txt"
# },
# {
# 'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
# 'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_50n_4sh_500lam_gunshot.txt"
# },
# {
# 'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
# 'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_50n_4sh_500lam_gunshot.txt"
# },
# {
# 'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
# 'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_50n_4sh_1000lam_gunshot.txt"
# },
# {
# 'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
# 'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_50n_4sh_1000lam_gunshot.txt"
# },
# {
# 'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
# 'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_50n_4sh_1000lam_gunshot.txt"
# },
# {
# 'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
# 'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_50n_4sh_1000lam_gunshot.txt"
# },
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_20p_10n_4sh_0lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_20p_10n_4sh_500lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_10n_4sh_0lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_10n_4sh_500lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_10n_4sh_0lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_10n_4sh_500lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_50n_4sh_0lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_eucl_10p_50n_4sh_100lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_50n_4sh_0lam_gunshotold.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_eucl_orth_10p_50n_4sh_0lam_gunshotold.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_50n_4sh_0lam_gunshotnorm.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_10p_50n_4sh_1000lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_50n_4sh_500lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_50n_4sh_0lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_50n_4sh_500lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_50n_4sh_500lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_50n_4sh_1000lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_50n_4sh_5000lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_20p_100n_4sh_0lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_20p_100n_4sh_500lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_20p_100n_4sh_500lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_20p_100n_4sh_1000lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_20p_100n_4sh_5000lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_20n_4sh_0lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_20n_4sh_500lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_20n_4sh_1000lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_10p_20n_4sh_5000lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_20n_4sh_0lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_20n_4sh_500lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_20n_4sh_1000lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_10p_20n_4sh_5000lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_5p_20n_4sh_0lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_5p_20n_4sh_500lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_5p_20n_4sh_1000lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kl_orth_5p_20n_4sh_5000lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_5p_20n_4sh_0lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_5p_20n_4sh_500lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_5p_20n_4sh_1000lam_gunshot.txt"
},
{
'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_eucl_orth_5p_20n_4sh_5000lam_gunshot.txt"
},
# {
# 'reference_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/mixtures_devtest_0367e094f3f5c81ef017d128ebff4a3c/list_"+cl+"_gt.csv",
# 'estimated_file': "/vol/vssp/AcousticEventsDetection/DCASE2017-baseline-system/applications/data/TUT-rare-sound-events-2017-development/generated_data/results/W_mel_01_orth_kls_10p_50n_4sh_10000lam_"+cl+".txt"
# },
]
data = []
# Get used event labels
all_data = sed_eval.util.event_list.EventList()
event_labels = all_data.unique_event_labels
for file_pair in file_list:
print(file_pair['estimated_file'])
reference_event_list = sed_eval.io.load_event_list(file_pair['reference_file'])
# Tracer()()
estimated_event_list = sed_eval.io.load_event_list(file_pair['estimated_file'])
data.append({'reference_event_list': reference_event_list,
'estimated_event_list': estimated_event_list})
all_data += reference_event_list
# Start evaluating
# Create metrics classes, define parameters
# segment_based_metrics = sed_eval.sound_event.SegmentBasedMetrics(event_label_list=event_labels,
# time_resolution=1)
event_based_metrics = sed_eval.sound_event.EventBasedMetrics(event_label_list=[0,1],t_collar=0.5,percentage_of_length=0.5,evaluate_onset=True, evaluate_offset=False)
# Go through files
for file in reference_event_list.unique_files:
# Get reference event list for file by filtering reference_event_list
reference_event_list_for_current_file = reference_event_list.filter(file=file)
# Get estimated event list for file by filtering estimated_event_list
estimated_event_list_for_current_file = estimated_event_list.filter(file=file)
event_based_metrics.evaluate(
reference_event_list=reference_event_list_for_current_file,
estimated_event_list=estimated_event_list_for_current_file
)
print(event_based_metrics.results_overall_metrics())
| 98.407547
| 228
| 0.815093
| 3,168
| 26,078
| 6.379735
| 0.035985
| 0.036713
| 0.057691
| 0.178319
| 0.9621
| 0.959379
| 0.950275
| 0.950275
| 0.94404
| 0.934145
| 0
| 0.096936
| 0.068794
| 26,078
| 264
| 229
| 98.780303
| 0.73534
| 0.244766
| 0
| 0.238889
| 0
| 0.433333
| 0.827669
| 0.75358
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.011111
| 0
| 0.011111
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
3fbc888270cb984e730abe729e04c57beadf51a4
| 97
|
py
|
Python
|
src/utils/__init__.py
|
menDDang/CycleGan-tf_2_1
|
073c5ebce92ba2cfa76804e43fdf6b4b5d4b279b
|
[
"Apache-2.0"
] | null | null | null |
src/utils/__init__.py
|
menDDang/CycleGan-tf_2_1
|
073c5ebce92ba2cfa76804e43fdf6b4b5d4b279b
|
[
"Apache-2.0"
] | null | null | null |
src/utils/__init__.py
|
menDDang/CycleGan-tf_2_1
|
073c5ebce92ba2cfa76804e43fdf6b4b5d4b279b
|
[
"Apache-2.0"
] | null | null | null |
from .image import read_image
from .image import write_image
from .data_loader import DataLoader
| 24.25
| 35
| 0.845361
| 15
| 97
| 5.266667
| 0.533333
| 0.227848
| 0.379747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123711
| 97
| 4
| 35
| 24.25
| 0.929412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3fbfc0bc971e38cc200520b955d032a584207a95
| 4,475
|
py
|
Python
|
miso/models/base_cyclic.py
|
Thubaralei/particle-classification
|
01d174e48aae1bb18a411008bf7ae92756e32892
|
[
"MIT"
] | 1
|
2021-11-16T16:46:35.000Z
|
2021-11-16T16:46:35.000Z
|
miso/models/base_cyclic.py
|
Thubaralei/particle-classification
|
01d174e48aae1bb18a411008bf7ae92756e32892
|
[
"MIT"
] | null | null | null |
miso/models/base_cyclic.py
|
Thubaralei/particle-classification
|
01d174e48aae1bb18a411008bf7ae92756e32892
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, Input, Activation, \
GlobalMaxPooling2D, GlobalAveragePooling2D, Lambda, DepthwiseConv2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.models import Model
import numpy as np
from miso.layers import cyclic
def base_cyclic(input_shape,
nb_classes,
filters=4,
blocks=None,
dropout=0.5,
dense=512,
conv_padding='same',
conv_activation='relu',
use_batch_norm=True,
global_pooling=None,
use_depthwise_conv=True):
# Number of blocks
if blocks is None:
blocks = int(np.log2(input_shape[0]) - 2)
inputs = Input(shape=input_shape)
x = cyclic.CyclicSlice4()(inputs)
# Convolution blocks
for i in range(blocks):
conv_filters = filters * 2 ** i
for j in range(2):
x = Conv2D(conv_filters, (3, 3), padding=conv_padding, activation=None, kernel_initializer='he_normal')(x)
if use_batch_norm is True:
x = BatchNormalization()(x)
x = Activation(conv_activation)(x)
x = MaxPooling2D()(x)
x = cyclic.CyclicRoll4()(x)
if global_pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif global_pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Dense layers
x = Flatten()(x)
x = cyclic.CyclicDensePool4(pool_op=tf.reduce_mean)(x)
x = Dropout(dropout)(x)
x = Dense(dense, activation='relu')(x)
x = Dense(nb_classes, activation='softmax')(x)
# Return model
model = Model(inputs, x, name='base_cyclic')
return model
def mirror_cyclic(input_shape,
nb_classes,
filters=4,
blocks=4,
dropout=0.5,
dense=512,
conv_padding='same',
conv_activation='relu',
use_batch_norm=True,
global_pooling=None):
default_bn_params = {
'axis': 3,
'momentum': 0.99,
'epsilon': 2e-5,
'center': True,
'scale': True,
}
inputs = Input(shape=input_shape)
x = cyclic.CyclicSlice4()(inputs)
for i in range(blocks):
conv_filters = filters * 2 ** i
# First layer
x = Conv2D(conv_filters, (3, 3), padding=conv_padding, activation=None, kernel_initializer='he_normal')(x)
if use_batch_norm is True:
# x = GroupNormalization(conv_filters)(x)
x = BatchNormalization()(x)
# x = LayerNormalization()(x)
# x = BatchInstanceNormalisation()(x)
# x = Lambda(lambda x: batch_instance_norm(x, scope="bin_{}_0".format(i)))(x)
# x = Lambda((lambda x: tf.layers.batch_normalization(x, training=K.learning_phase())))(x)
xa = Activation(conv_activation)(x)
xb = Activation(conv_activation)(-x)
x = tf.stack([xa, xb], 4)
x = tf.reduce_max(x, axis=-1)
# Second layer
x = Conv2D(conv_filters, (3, 3), padding=conv_padding, activation=None, kernel_initializer='he_normal')(x)
if use_batch_norm is True:
# x = GroupNormalization(conv_filters)(x)
x = BatchNormalization()(x)
# x = LayerNormalization()(x)
# x = BatchInstanceNormalisation()(x)
# x = Lambda(lambda x: batch_instance_norm(x, scope="bin_{}_1".format(i)))(x)
# x = Lambda((lambda x: tf.layers.batch_normalization(x, training=K.learning_phase())))(x)
# x = Activation(conv_activation)(x)
xa = Activation(conv_activation)(x)
xb = Activation(conv_activation)(-x)
x = tf.stack([xa, xb], 4)
x = tf.reduce_max(x, axis=-1)
# Pool
x = MaxPooling2D()(x)
# Roll
x = cyclic.CyclicRoll4()(x)
if global_pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif global_pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Dense layers
x = Flatten()(x)
x = cyclic.CyclicDensePool4(pool_op=tf.reduce_mean)(x)
x = Dropout(dropout)(x)
x = Dense(dense, activation='relu')(x)
# x = cyclic.CyclicDensePool4(pool_op=tf.reduce_mean)(x)
x = Dense(nb_classes, activation='softmax')(x)
model = Model(inputs, x, name='base_cyclic')
return model
| 36.680328
| 118
| 0.581899
| 526
| 4,475
| 4.80038
| 0.212928
| 0.019802
| 0.05703
| 0.059406
| 0.805941
| 0.78099
| 0.760396
| 0.760396
| 0.704158
| 0.630495
| 0
| 0.018389
| 0.295196
| 4,475
| 121
| 119
| 36.983471
| 0.782181
| 0.164693
| 0
| 0.722222
| 0
| 0
| 0.034724
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.066667
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b204d2fb23cad0ecea1baef66f7cf6edcbd9fcd2
| 37
|
py
|
Python
|
tests/t2.py
|
raffaelfoidl/noworkflow
|
aa4ca189df24fec6c7abd32bcca6a097b21fdf31
|
[
"MIT"
] | 108
|
2015-02-04T14:16:51.000Z
|
2022-03-06T13:52:45.000Z
|
tests/t2.py
|
raffaelfoidl/noworkflow
|
aa4ca189df24fec6c7abd32bcca6a097b21fdf31
|
[
"MIT"
] | 92
|
2015-01-19T14:58:06.000Z
|
2021-04-19T17:28:50.000Z
|
tests/t2.py
|
raffaelfoidl/noworkflow
|
aa4ca189df24fec6c7abd32bcca6a097b21fdf31
|
[
"MIT"
] | 31
|
2015-03-03T23:53:59.000Z
|
2021-11-11T04:23:44.000Z
|
import t3
def run():
t3.add(8)
| 6.166667
| 13
| 0.540541
| 7
| 37
| 2.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 0.297297
| 37
| 5
| 14
| 7.4
| 0.653846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b20e67b75a399bd3ecc8aba12ac42b517202513c
| 6,275
|
py
|
Python
|
tests/generators/test_cmake.py
|
mropert/crom
|
b871b756c348952de2a044b22b36c9fbb0e76132
|
[
"MIT"
] | null | null | null |
tests/generators/test_cmake.py
|
mropert/crom
|
b871b756c348952de2a044b22b36c9fbb0e76132
|
[
"MIT"
] | null | null | null |
tests/generators/test_cmake.py
|
mropert/crom
|
b871b756c348952de2a044b22b36c9fbb0e76132
|
[
"MIT"
] | null | null | null |
from crom.generators import cmake
from crom.project import Project
def test_generate_lib():
project = Project('hello', Project.LIBRARY, sources={'src/foo.cpp': None},
headers={'include/foo/foo.hpp': None})
files = cmake.generate_lib(project, 'src', 'include')
assert len(files) == 1
assert 'CMakeLists.txt' in files
assert files['CMakeLists.txt'] == ('cmake_minimum_required(VERSION 3.2)\n'
'project(hello)\n'
'\n'
'include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\n'
'conan_basic_setup()\n'
'\n'
'add_library(hello src/foo.cpp include/foo/foo.hpp)\n'
'target_include_directories(hello PUBLIC include)\n'
'target_include_directories(hello PRIVATE src)\n')
def test_generate_lib_with_test():
project = Project('hello', Project.LIBRARY, sources={'src/foo.cpp': None},
headers={'include/foo/foo.hpp': None}, tests={'test/test.cpp': None})
files = cmake.generate_lib(project, 'src', 'include')
assert len(files) == 1
assert 'CMakeLists.txt' in files
assert files['CMakeLists.txt'] == ('cmake_minimum_required(VERSION 3.2)\n'
'project(hello)\n'
'\n'
'include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\n'
'conan_basic_setup()\n'
'\n'
'add_library(hello src/foo.cpp include/foo/foo.hpp)\n'
'target_include_directories(hello PUBLIC include)\n'
'target_include_directories(hello PRIVATE src)\n'
'\n'
'enable_testing()\n'
'add_executable(hello_test test/test.cpp)\n'
'target_link_libraries(hello_test PRIVATE hello)\n'
'add_test(NAME hello_test COMMAND hello_test)\n')
def test_generate_lib_with_test_and_prefix():
project = Project('hello', Project.LIBRARY, sources={'src/foo.cpp': None},
headers={'include/foo/foo.hpp': None}, tests={'test/test.cpp': None})
files = cmake.generate_lib(project, 'src', 'include', prefix="..")
assert len(files) == 1
assert 'CMakeLists.txt' in files
assert files['CMakeLists.txt'] == ('cmake_minimum_required(VERSION 3.2)\n'
'project(hello)\n'
'\n'
'include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\n'
'conan_basic_setup()\n'
'\n'
'add_library(hello ../src/foo.cpp ../include/foo/foo.hpp)\n'
'target_include_directories(hello PUBLIC ../include)\n'
'target_include_directories(hello PRIVATE ../src)\n'
'\n'
'enable_testing()\n'
'add_executable(hello_test ../test/test.cpp)\n'
'target_link_libraries(hello_test PRIVATE hello)\n'
'add_test(NAME hello_test COMMAND hello_test)\n')
def test_generate_lib_multiple_files():
project = Project('hello', Project.LIBRARY, sources={'src/foo.cpp': None, 'src/bar.cpp': None},
headers={'include/foo/foo.hpp': None, 'include/foo/bar.hpp': None})
files = cmake.generate_lib(project, 'src', 'include')
assert len(files) == 1
assert 'CMakeLists.txt' in files
assert files['CMakeLists.txt'] == ('cmake_minimum_required(VERSION 3.2)\n'
'project(hello)\n'
'\n'
'include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\n'
'conan_basic_setup()\n'
'\n'
'add_library(hello src/bar.cpp src/foo.cpp'
' include/foo/bar.hpp include/foo/foo.hpp)\n'
'target_include_directories(hello PUBLIC include)\n'
'target_include_directories(hello PRIVATE src)\n')
def test_generate_exe():
project = Project('hello', Project.EXECUTABLE, sources={'foo.cpp': None})
files = cmake.generate_exe(project, None)
assert len(files) == 1
assert 'CMakeLists.txt' in files
assert files['CMakeLists.txt'] == ('cmake_minimum_required(VERSION 3.2)\n'
'project(hello)\n'
'\n'
'include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\n'
'conan_basic_setup()\n'
'\n'
'add_executable(hello foo.cpp)\n')
def test_generate_exe_multiple_files():
project = Project('hello', Project.EXECUTABLE,
sources={'foo.cpp': None, 'bar.cpp': None, 'bazz.cpp': None})
files = cmake.generate_exe(project, None)
assert len(files) == 1
assert 'CMakeLists.txt' in files
assert files['CMakeLists.txt'] == ('cmake_minimum_required(VERSION 3.2)\n'
'project(hello)\n'
'\n'
'include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\n'
'conan_basic_setup()\n'
'\n'
'add_executable(hello bar.cpp bazz.cpp foo.cpp)\n')
| 56.026786
| 99
| 0.46247
| 589
| 6,275
| 4.750424
| 0.10017
| 0.010007
| 0.025733
| 0.045747
| 0.942459
| 0.927806
| 0.918513
| 0.911723
| 0.899571
| 0.864189
| 0
| 0.00499
| 0.425179
| 6,275
| 111
| 100
| 56.531532
| 0.770724
| 0
| 0
| 0.744898
| 1
| 0
| 0.359363
| 0.170996
| 0
| 0
| 0
| 0
| 0.183673
| 1
| 0.061224
| false
| 0
| 0.020408
| 0
| 0.081633
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b77dfc2da1150c893a94f5e1e142eab3ea014fd0
| 21,763
|
py
|
Python
|
tests/export/html/test_heading.py
|
botzill/pydocx
|
98c6aa626d875278240eabea8f86a914840499b3
|
[
"Apache-2.0"
] | 127
|
2015-01-12T22:35:34.000Z
|
2022-01-20T06:24:18.000Z
|
tests/export/html/test_heading.py
|
turbo-q/pydocx
|
98c6aa626d875278240eabea8f86a914840499b3
|
[
"Apache-2.0"
] | 156
|
2015-01-05T19:55:56.000Z
|
2020-10-14T07:01:42.000Z
|
tests/export/html/test_heading.py
|
turbo-q/pydocx
|
98c6aa626d875278240eabea8f86a914840499b3
|
[
"Apache-2.0"
] | 45
|
2015-02-22T18:52:08.000Z
|
2021-06-14T08:05:47.000Z
|
# coding: utf-8
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
from pydocx.openxml.packaging import (
MainDocumentPart,
NumberingDefinitionsPart,
StyleDefinitionsPart,
)
from pydocx.test import DocumentGeneratorTestCase
from pydocx.test.utils import WordprocessingDocumentFactory
class HeadingStylesTestCase(DocumentGeneratorTestCase):
document_xml = '''
<p>
<pPr>
<pStyle val="heading1"/>
</pPr>
<r>
<t>aaa</t>
</r>
</p>
'''
def test_ignored_styles(self):
style_xml = '''
<style styleId="heading1" type="paragraph">
<name val="Heading 1"/>
<rPr>
<b val="on"/>
<caps val="on"/>
<smallCaps val="on"/>
<strike val="on"/>
<dstrike val="on"/>
</rPr>
</style>
'''
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(MainDocumentPart, self.document_xml)
expected_html = '''
<h1>aaa</h1>
'''
self.assert_document_generates_html(document, expected_html)
def test_italic_preserved(self):
style_xml = '''
<style styleId="heading1" type="paragraph">
<name val="Heading 1"/>
<rPr>
<i val="on"/>
</rPr>
</style>
'''
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(MainDocumentPart, self.document_xml)
expected_html = '''
<h1><em>aaa</em></h1>
'''
self.assert_document_generates_html(document, expected_html)
def test_vanished_is_preserved(self):
style_xml = '''
<style styleId="heading1" type="paragraph">
<name val="Heading 1"/>
<rPr>
<vanish val="on"/>
</rPr>
</style>
'''
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(MainDocumentPart, self.document_xml)
expected_html = '''
<h1>
<span class="pydocx-hidden">aaa</span>
</h1>
'''
self.assert_document_generates_html(document, expected_html)
def test_hidden_is_preserved(self):
style_xml = '''
<style styleId="heading1" type="paragraph">
<name val="Heading 1"/>
<rPr>
<webHidden val="on"/>
</rPr>
</style>
'''
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(MainDocumentPart, self.document_xml)
expected_html = '''
<h1>
<span class="pydocx-hidden">aaa</span>
</h1>
'''
self.assert_document_generates_html(document, expected_html)
class HeadingTestCase(DocumentGeneratorTestCase):
def test_each_heading_level(self):
style_template = '''
<style styleId="heading%s" type="paragraph">
<name val="Heading %s"/>
</style>
'''
style_xml = ''.join(
style_template % (i, i)
for i in range(1, 11)
)
paragraph_template = '''
<p>
<pPr>
<pStyle val="%s"/>
</pPr>
<r>
<t>%s</t>
</r>
</p>
'''
style_to_text = [
('heading1', 'aaa'),
('heading2', 'bbb'),
('heading3', 'ccc'),
('heading4', 'ddd'),
('heading5', 'eee'),
('heading6', 'fff'),
('heading7', 'ggg'),
('heading8', 'hhh'),
('heading9', 'iii'),
('heading10', 'jjj'),
]
document_xml = ''.join(
paragraph_template % entry
for entry in style_to_text
)
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(MainDocumentPart, document_xml)
expected_html = '''
<h1>aaa</h1>
<h2>bbb</h2>
<h3>ccc</h3>
<h4>ddd</h4>
<h5>eee</h5>
<h6>fff</h6>
<h6>ggg</h6>
<h6>hhh</h6>
<h6>iii</h6>
<h6>jjj</h6>
'''
self.assert_document_generates_html(document, expected_html)
def test_single_list_lvl_with_heading_is_converted_to_list_strong(self):
style_xml = '''
<style styleId="heading1" type="paragraph">
<name val="Heading 1"/>
</style>
'''
numbering_xml = '''
<num numId="1">
<abstractNumId val="1"/>
</num>
<abstractNum abstractNumId="1">
<lvl ilvl="0">
<numFmt val="decimal"/>
</lvl>
</abstractNum>
'''
document_xml = '''
<p>
<pPr>
<pStyle val="heading1"/>
<numPr>
<ilvl val="0" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>foo</t>
</r>
</p>
'''
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(NumberingDefinitionsPart, numbering_xml)
document.add(MainDocumentPart, document_xml)
expected_html = '''
<ol class="pydocx-list-style-type-decimal">
<li>
<strong>foo</strong>
</li>
</ol>
'''
self.assert_document_generates_html(document, expected_html)
def test_heading_in_a_nested_list_numbering_is_preserved_with_strong(self):
style_xml = '''
<style styleId="heading1" type="paragraph">
<name val="Heading 1"/>
</style>
'''
numbering_xml = '''
<num numId="1">
<abstractNumId val="1"/>
</num>
<abstractNum abstractNumId="1">
<lvl ilvl="0">
<numFmt val="decimal"/>
</lvl>
<lvl ilvl="1">
<numFmt val="lowerLetter"/>
</lvl>
</abstractNum>
'''
document_xml = '''
<p>
<pPr>
<numPr>
<ilvl val="0" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>foo</t>
</r>
</p>
<p>
<pPr>
<pStyle val="heading1"/>
<numPr>
<ilvl val="1" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>bar</t>
</r>
</p>
'''
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(NumberingDefinitionsPart, numbering_xml)
document.add(MainDocumentPart, document_xml)
expected_html = '''
<ol class="pydocx-list-style-type-decimal">
<li>
foo
<ol class="pydocx-list-style-type-lowerLetter">
<li>
<strong>bar</strong>
</li>
</ol>
</li>
</ol>
'''
self.assert_document_generates_html(document, expected_html)
def test_heading_in_nested_sub_list(self):
style_xml = '''
<style styleId="heading1" type="paragraph">
<name val="Heading 1"/>
</style>
'''
numbering_xml = '''
<num numId="1">
<abstractNumId val="1"/>
</num>
<abstractNum abstractNumId="1">
<lvl ilvl="0">
<numFmt val="decimal"/>
</lvl>
<lvl ilvl="1">
<numFmt val="lowerLetter"/>
</lvl>
</abstractNum>
'''
document_xml = '''
<p>
<pPr>
<numPr>
<ilvl val="0" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>foo</t>
</r>
</p>
<p>
<pPr>
<numPr>
<ilvl val="1" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>bar</t>
</r>
</p>
<p>
<pPr>
<pStyle val="heading1"/>
<numPr>
<ilvl val="2" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>baz</t>
</r>
</p>
'''
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(NumberingDefinitionsPart, numbering_xml)
document.add(MainDocumentPart, document_xml)
expected_html = '''
<ol class="pydocx-list-style-type-decimal">
<li>
foo
<ol class="pydocx-list-style-type-lowerLetter">
<li>bar</li>
</ol>
</li>
</ol>
<h1>baz</h1>
'''
self.assert_document_generates_html(document, expected_html)
def test_headings_in_list_surrounding_paragraph_stay_in_list_with_strong(self):
style_xml = '''
<style styleId="heading1" type="paragraph">
<name val="Heading 1"/>
</style>
'''
numbering_xml = '''
<num numId="1">
<abstractNumId val="1"/>
</num>
<abstractNum abstractNumId="1">
<lvl ilvl="0">
<numFmt val="decimal"/>
</lvl>
</abstractNum>
'''
document_xml = '''
<p>
<pPr>
<pStyle val="heading1"/>
<numPr>
<ilvl val="0" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>foo</t>
</r>
</p>
<p><r><t>bare paragraph</t></r></p>
<p>
<pPr>
<pStyle val="heading1"/>
<numPr>
<ilvl val="0" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>bar</t>
</r>
</p>
'''
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(NumberingDefinitionsPart, numbering_xml)
document.add(MainDocumentPart, document_xml)
expected_html = '''
<ol class="pydocx-list-style-type-decimal">
<li>
<strong>foo</strong>
<br />
bare paragraph
</li>
<li>
<strong>bar</strong>
</li>
</ol>
'''
self.assert_document_generates_html(document, expected_html)
def test_heading_in_table_cell(self):
style_xml = '''
<style styleId="heading1" type="paragraph">
<name val="Heading 1"/>
</style>
'''
document_xml = '''
<tbl>
<tr>
<tc>
<p>
<pPr>
<pStyle val="heading1"/>
<numPr>
<ilvl val="0" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>foo</t>
</r>
</p>
</tc>
</tr>
</tbl>
'''
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(MainDocumentPart, document_xml)
expected_html = '''
<table border="1">
<tr>
<td><h1>foo</h1></td>
</tr>
</table>
'''
self.assert_document_generates_html(document, expected_html)
def test_heading_as_new_list_following_bare_paragraph_plus_list(self):
style_xml = '''
<style styleId="heading1" type="paragraph">
<name val="Heading 1"/>
</style>
'''
numbering_xml = '''
<num numId="1">
<abstractNumId val="1"/>
</num>
<abstractNum abstractNumId="1">
<lvl ilvl="0">
<numFmt val="decimal"/>
</lvl>
</abstractNum>
<num numId="2">
<abstractNumId val="2"/>
</num>
<abstractNum abstractNumId="2">
<lvl ilvl="0">
<numFmt val="decimal"/>
</lvl>
</abstractNum>
'''
document_xml = '''
<p>
<pPr>
<numPr>
<ilvl val="0" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>foo</t>
</r>
</p>
<p><r><t>bare paragraph</t></r></p>
<p>
<pPr>
<pStyle val="heading1"/>
<numPr>
<ilvl val="0" />
<numId val="2" />
</numPr>
</pPr>
<r>
<t>bar</t>
</r>
</p>
'''
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(NumberingDefinitionsPart, numbering_xml)
document.add(MainDocumentPart, document_xml)
expected_html = '''
<ol class="pydocx-list-style-type-decimal">
<li>foo</li>
</ol>
<p>bare paragraph</p>
<ol class="pydocx-list-style-type-decimal">
<li><strong>bar</strong></li>
</ol>
'''
self.assert_document_generates_html(document, expected_html)
def test_heading_as_list_following_bare_paragraph_plus_list(self):
style_xml = '''
<style styleId="heading1" type="paragraph">
<name val="Heading 1"/>
</style>
'''
numbering_xml = '''
<num numId="1">
<abstractNumId val="1"/>
</num>
<abstractNum abstractNumId="1">
<lvl ilvl="0">
<numFmt val="decimal"/>
</lvl>
</abstractNum>
'''
document_xml = '''
<p>
<pPr>
<numPr>
<ilvl val="0" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>foo</t>
</r>
</p>
<p><r><t>bare paragraph</t></r></p>
<p>
<pPr>
<pStyle val="heading1"/>
<numPr>
<ilvl val="0" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>bar</t>
</r>
</p>
'''
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(NumberingDefinitionsPart, numbering_xml)
document.add(MainDocumentPart, document_xml)
expected_html = '''
<ol class="pydocx-list-style-type-decimal">
<li>foo<br />bare paragraph</li>
<li><strong>bar</strong></li>
</ol>
'''
self.assert_document_generates_html(document, expected_html)
def test_list_heading_table_paragraph(self):
style_xml = '''
<style styleId="heading1" type="paragraph">
<name val="Heading 1"/>
</style>
'''
numbering_xml = '''
<num numId="1">
<abstractNumId val="1"/>
</num>
<abstractNum abstractNumId="1">
<lvl ilvl="0">
<numFmt val="decimal"/>
</lvl>
</abstractNum>
'''
document_xml = '''
<p>
<pPr>
<numPr>
<ilvl val="0"/>
<numId val="1"/>
</numPr>
</pPr>
<r>
<t>single list item</t>
</r>
</p>
<p>
<pPr>
<pStyle val="heading1"/>
</pPr>
<r>
<t>actual heading</t>
</r>
</p>
<p>
<r>
<t>before table</t>
</r>
</p>
<tbl>
<tr>
<tc>
<p>
<r>
<t>foo</t>
</r>
</p>
</tc>
</tr>
</tbl>
<p>
<r>
<t>after table</t>
</r>
</p>
'''
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(NumberingDefinitionsPart, numbering_xml)
document.add(MainDocumentPart, document_xml)
expected_html = '''
<ol class="pydocx-list-style-type-decimal">
<li>single list item</li>
</ol>
<h1>actual heading</h1>
<p>before table</p>
<table border="1">
<tr>
<td>foo</td>
</tr>
</table>
<p>after table</p>
'''
self.assert_document_generates_html(document, expected_html)
def test_single_lvl_list_has_precedence_over_headings(self):
style_xml = '''
<style styleId="heading1" type="paragraph">
<name val="Heading 1"/>
</style>
'''
numbering_xml = '''
<num numId="1">
<abstractNumId val="1"/>
</num>
<abstractNum abstractNumId="1">
<lvl ilvl="0">
<numFmt val="decimal"/>
</lvl>
</abstractNum>
'''
document_xml = '''
<p>
<pPr>
<pStyle val="heading1"/>
<numPr>
<ilvl val="0" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>foo</t>
</r>
</p>
<p>
<pPr>
<numPr>
<ilvl val="0" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>non-heading list item</t>
</r>
</p>
<p>
<pPr>
<pStyle val="heading1"/>
<numPr>
<ilvl val="0" />
<numId val="1" />
</numPr>
</pPr>
<r>
<t>bar</t>
</r>
</p>
'''
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(NumberingDefinitionsPart, numbering_xml)
document.add(MainDocumentPart, document_xml)
expected_html = '''
<ol class="pydocx-list-style-type-decimal">
<li><strong>foo</strong></li>
<li>non-heading list item</li>
<li><strong>bar</strong></li>
</ol>
'''
self.assert_document_generates_html(document, expected_html)
def test_heading_with_bookmark(self):
document_xml = '''
<p>
<pPr>
<pStyle val="heading1"/>
</pPr>
<bookmarkStart name="testing"/>
<bookmarkEnd/>
<r>
<t>aaa</t>
</r>
</p>
'''
style_xml = '''
<style styleId="heading1" type="paragraph">
<name val="Heading 1"/>
</style>
'''
document = WordprocessingDocumentFactory()
document.add(StyleDefinitionsPart, style_xml)
document.add(MainDocumentPart, document_xml)
expected_html = '<h1 id="testing">aaa</h1>'
self.assert_document_generates_html(document, expected_html)
| 28.117571
| 83
| 0.402978
| 1,710
| 21,763
| 4.984795
| 0.092398
| 0.049038
| 0.009503
| 0.019944
| 0.838221
| 0.8313
| 0.826725
| 0.823909
| 0.814876
| 0.808423
| 0
| 0.014345
| 0.468272
| 21,763
| 773
| 84
| 28.153946
| 0.722261
| 0.000597
| 0
| 0.854286
| 0
| 0
| 0.668475
| 0.034164
| 0
| 0
| 0
| 0
| 0.021429
| 1
| 0.021429
| false
| 0
| 0.007143
| 0
| 0.032857
| 0.001429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b77ebe7ff1f711e2b54605539bb4d3901a4ec038
| 6,398
|
py
|
Python
|
grammars/gen/CouncilOfStateListener.py
|
OpenLawsGR/judgments2AKN
|
0c6217349cde36058d5599800e289fdf0d3eaf23
|
[
"MIT"
] | 5
|
2019-11-28T17:02:59.000Z
|
2021-02-05T17:39:49.000Z
|
grammars/gen/CouncilOfStateListener.py
|
OpenLawsGR/judgments2AKN
|
0c6217349cde36058d5599800e289fdf0d3eaf23
|
[
"MIT"
] | null | null | null |
grammars/gen/CouncilOfStateListener.py
|
OpenLawsGR/judgments2AKN
|
0c6217349cde36058d5599800e289fdf0d3eaf23
|
[
"MIT"
] | null | null | null |
# Generated from /home/plessas/EDBM34/grammars/CouncilOfState.g4 by ANTLR 4.7.2
from antlr4 import *
# This class defines a complete listener for a parse tree produced by CouncilOfStateParser.
class CouncilOfStateListener(ParseTreeListener):
# Enter a parse tree produced by CouncilOfStateParser#akomaNtoso.
def enterAkomaNtoso(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#akomaNtoso.
def exitAkomaNtoso(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#text.
def enterText(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#text.
def exitText(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#judgment.
def enterJudgment(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#judgment.
def exitJudgment(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#header.
def enterHeader(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#header.
def exitHeader(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#caseNmuber.
def enterCaseNmuber(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#caseNmuber.
def exitCaseNmuber(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#docNumber.
def enterDocNumber(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#docNumber.
def exitDocNumber(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#docProponent.
def enterDocProponent(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#docProponent.
def exitDocProponent(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#headerPar.
def enterHeaderPar(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#headerPar.
def exitHeaderPar(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#judgmentBody.
def enterJudgmentBody(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#judgmentBody.
def exitJudgmentBody(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#introduction.
def enterIntroduction(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#introduction.
def exitIntroduction(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#introductionIntro.
def enterIntroductionIntro(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#introductionIntro.
def exitIntroductionIntro(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#intro_Par.
def enterIntro_Par(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#intro_Par.
def exitIntro_Par(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#motivation.
def enterMotivation(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#motivation.
def exitMotivation(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#motivPar.
def enterMotivPar(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#motivPar.
def exitMotivPar(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#blockList.
def enterBlockList(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#blockList.
def exitBlockList(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#ste_item.
def enterSte_item(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#ste_item.
def exitSte_item(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#num.
def enterNum(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#num.
def exitNum(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#itemPar.
def enterItemPar(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#itemPar.
def exitItemPar(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#decision.
def enterDecision(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#decision.
def exitDecision(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#decisionIntro.
def enterDecisionIntro(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#decisionIntro.
def exitDecisionIntro(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#outcomePar.
def enterOutcomePar(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#outcomePar.
def exitOutcomePar(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#decisionPar.
def enterDecisionPar(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#decisionPar.
def exitDecisionPar(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#outcome.
def enterOutcome(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#outcome.
def exitOutcome(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#conclusions.
def enterConclusions(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#conclusions.
def exitConclusions(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#conclusionIntro.
def enterConclusionIntro(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#conclusionIntro.
def exitConclusionIntro(self, ctx):
pass
# Enter a parse tree produced by CouncilOfStateParser#concPar.
def enterConcPar(self, ctx):
pass
# Exit a parse tree produced by CouncilOfStateParser#concPar.
def exitConcPar(self, ctx):
pass
| 26.547718
| 91
| 0.696468
| 716
| 6,398
| 6.212291
| 0.159218
| 0.071493
| 0.119155
| 0.214478
| 0.799685
| 0.799685
| 0.790692
| 0.789568
| 0.645683
| 0.636241
| 0
| 0.001448
| 0.244295
| 6,398
| 240
| 92
| 26.658333
| 0.918511
| 0.529384
| 0
| 0.490566
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.490566
| false
| 0.490566
| 0.009434
| 0
| 0.509434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 9
|
b79c9539caa2a13e2d6ef318bf54374661b68f74
| 28,237
|
py
|
Python
|
mozillians/groups/tests/test_tasks.py
|
divyamoncy/mozillians
|
d53d1d05d1f05b74f8533541e37083dcb89b29a8
|
[
"BSD-3-Clause"
] | 202
|
2015-01-14T10:19:55.000Z
|
2021-12-11T06:04:16.000Z
|
mozillians/groups/tests/test_tasks.py
|
divyamoncy/mozillians
|
d53d1d05d1f05b74f8533541e37083dcb89b29a8
|
[
"BSD-3-Clause"
] | 2,924
|
2015-01-07T11:27:32.000Z
|
2021-01-19T14:05:17.000Z
|
mozillians/groups/tests/test_tasks.py
|
divyamoncy/mozillians
|
d53d1d05d1f05b74f8533541e37083dcb89b29a8
|
[
"BSD-3-Clause"
] | 270
|
2015-01-02T18:31:01.000Z
|
2021-02-17T20:57:44.000Z
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.conf import settings
from django.template.loader import get_template
from django.test import override_settings
from django.utils.timezone import now
from mock import patch, ANY
from nose.tools import eq_, ok_
from mozillians.common.tests import TestCase
from mozillians.groups import tasks
from mozillians.groups.models import Group, GroupMembership, Skill
from mozillians.groups.tasks import (invalidate_group_membership, email_membership_change,
notify_membership_renewal)
from mozillians.groups.tests import GroupFactory, InviteFactory, SkillFactory
from mozillians.users.tests import UserFactory
class SendPendingMembershipEmailsTests(TestCase):
def test_remove_empty_groups(self):
user = UserFactory.create()
group_1 = GroupFactory.create()
GroupFactory.create()
skill_1 = SkillFactory.create()
SkillFactory.create()
group_1.add_member(user.userprofile)
skill_1.members.add(user.userprofile)
tasks.remove_empty_groups()
eq_(Group.objects.all().count(), 1)
ok_(Group.objects.filter(id=group_1.id).exists())
eq_(Skill.objects.all().count(), 1)
ok_(Skill.objects.filter(id=skill_1.id).exists())
def test_sending_pending_email(self):
# If a curated group has a pending membership, added since the reminder email
# was last sent, send the curator an email. It should contain the count of
# all pending memberships.
curator = UserFactory.create()
group = GroupFactory.create()
group.curators.add(curator.userprofile)
# Add a couple of pending memberships
group.add_member(UserFactory.create().userprofile, GroupMembership.PENDING)
group.add_member(UserFactory.create().userprofile, GroupMembership.PENDING)
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
tasks.send_pending_membership_emails()
ok_(mock_send_mail.called)
# Should only have been called once
eq_(1, len(mock_send_mail.call_args_list))
# The message body should mention that there are 2 pending memberships
subject, body, from_addr, to_list = mock_send_mail.call_args[0]
eq_('2 outstanding requests to join Mozillians group "%s"' % group.name, subject)
ok_('There are 2 outstanding requests' in body)
# Full path to group page is in the message
ok_(group.get_absolute_url() in body)
ok_(curator.email in to_list)
# Add another pending membership
group.add_member(UserFactory.create().userprofile, GroupMembership.PENDING)
# Should send email again
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
tasks.send_pending_membership_emails()
ok_(mock_send_mail.called)
def test_sending_pending_email_singular(self):
# If a curated group has exactly one pending membership, added since the reminder email
# was last sent, send the curator an email. It should contain the count of
# all pending memberships, which should be one, and should use the singular text.
curator = UserFactory.create()
group = GroupFactory.create()
group.curators.add(curator.userprofile)
# Add one pending membership
group.add_member(UserFactory.create().userprofile, GroupMembership.PENDING)
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
tasks.send_pending_membership_emails()
ok_(mock_send_mail.called)
# The message body should mention that there is 1 pending memberships
subject, body, from_addr, to_list = mock_send_mail.call_args[0]
eq_('1 outstanding request to join Mozillians group "%s"' % group.name, subject)
ok_('There is 1 outstanding request' in body)
# Full path to group page is in the message
ok_(group.get_absolute_url() in body)
ok_(curator.email in to_list)
def test_sending_pending_email_already_sent(self):
# If a curated group has a pending membership, but it was added before the
# last time a reminder email was sent, do not send the curator an email.
# curated group:
group = GroupFactory.create()
group.curators.add(UserFactory.create().userprofile)
# Pending membership
user1 = UserFactory.create()
group.add_member(user1.userprofile, GroupMembership.PENDING)
membership = GroupMembership.objects.get(userprofile=user1.userprofile, group=group)
membership.save()
# Send email. This should update the field remembering the max pending request pk.
tasks.send_pending_membership_emails()
# Non-pending membership
user2 = UserFactory.create()
group.add_member(user2.userprofile, GroupMembership.MEMBER)
# None of this should trigger an email send
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
tasks.send_pending_membership_emails()
ok_(not mock_send_mail.called)
def test_sending_pending_email_non_curated(self):
# If a non-curated group has a pending membership, do not send anyone an email
group = GroupFactory.create(accepting_new_members=Group.REVIEWED)
user = UserFactory.create()
group.add_member(user.userprofile, GroupMembership.PENDING)
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
tasks.send_pending_membership_emails()
ok_(not mock_send_mail.called)
class EmailMembershipChangeTests(TestCase):
def setUp(self):
self.group = GroupFactory.create()
self.group.curators.add(UserFactory.create().userprofile)
self.user = UserFactory.create()
def test_member_accepted(self):
template_name = 'groups/email/accepted.txt'
template = get_template(template_name)
with patch('mozillians.groups.tasks.get_template', autospec=True) as mock_get_template:
mock_get_template.return_value = template
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
email_membership_change(self.group.pk, self.user.pk,
GroupMembership.PENDING, GroupMembership.MEMBER)
ok_(mock_send_mail.called)
ok_(mock_get_template.called)
eq_(template_name, mock_get_template.call_args[0][0])
subject, body, from_addr, to_list = mock_send_mail.call_args[0]
eq_(settings.FROM_NOREPLY, from_addr)
eq_([self.user.email], to_list)
eq_('Accepted to Mozillians group "%s"' % self.group.name, subject)
ok_('You have been accepted' in body)
def test_member_rejected(self):
template_name = 'groups/email/rejected.txt'
template = get_template(template_name)
with patch('mozillians.groups.tasks.get_template', autospec=True) as mock_get_template:
mock_get_template.return_value = template
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
email_membership_change(self.group.pk, self.user.pk,
GroupMembership.PENDING, None)
ok_(mock_send_mail.called)
ok_(mock_get_template.called)
eq_(template_name, mock_get_template.call_args[0][0])
subject, body, from_addr, to_list = mock_send_mail.call_args[0]
eq_(settings.FROM_NOREPLY, from_addr)
eq_([self.user.email], to_list)
eq_('Not accepted to Mozillians group "%s"' % self.group.name, subject)
ok_('You have not been accepted' in body)
def test_membership_changed(self):
template_name = 'groups/email/membership_status_changed.txt'
template = get_template(template_name)
with patch('mozillians.groups.tasks.get_template', autospec=True) as mock_get_template:
mock_get_template.return_value = template
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
email_membership_change(self.group.pk, self.user.pk,
GroupMembership.MEMBER, GroupMembership.PENDING)
ok_(mock_send_mail.called)
ok_(mock_get_template.called)
eq_(template_name, mock_get_template.call_args[0][0])
subject, body, from_addr, to_list = mock_send_mail.call_args[0]
eq_(settings.FROM_NOREPLY, from_addr)
eq_([self.user.email], to_list)
eq_('Status changed for Mozillians group "%s"' % self.group.name, subject)
ok_('Your membership status has changed' in body)
def test_member_removed(self):
template_name = 'groups/email/member_removed.txt'
template = get_template(template_name)
with patch('mozillians.groups.tasks.get_template', autospec=True) as mock_get_template:
mock_get_template.return_value = template
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
email_membership_change(self.group.pk, self.user.pk, GroupMembership.MEMBER, None)
ok_(mock_send_mail.called)
ok_(mock_get_template.called)
eq_(template_name, mock_get_template.call_args[0][0])
subject, body, from_addr, to_list = mock_send_mail.call_args[0]
eq_(settings.FROM_NOREPLY, from_addr)
eq_([self.user.email], to_list)
eq_('Removed from Mozillians group "%s"' % self.group.name, subject)
ok_('You have been removed' in body)
class MembershipInvalidationTests(TestCase):
""" Test membership invalidation."""
@patch('mozillians.groups.models.email_membership_change')
def test_invalidate_open_group(self, mail_task):
member = UserFactory.create(vouched=True)
curator = UserFactory.create(vouched=True)
# Group of type Group.OPEN
group = GroupFactory.create(name='Foo', terms='Example terms.', invalidation_days=5,
accepting_new_members=Group.OPEN)
group.curators.add(curator.userprofile)
group.add_member(member.userprofile)
group.add_member(curator.userprofile)
membership = group.groupmembership_set.filter(userprofile=member.userprofile)
curator_membership = group.groupmembership_set.filter(userprofile=curator.userprofile)
membership.update(updated_on=datetime.now() - timedelta(days=10))
curator_membership.update(updated_on=datetime.now() - timedelta(days=10))
eq_(membership[0].status, GroupMembership.MEMBER)
eq_(curator_membership[0].status, GroupMembership.MEMBER)
invalidate_group_membership()
ok_(not group.groupmembership_set.filter(userprofile=member.userprofile).exists())
ok_(group.groupmembership_set.filter(userprofile=curator.userprofile).exists())
mail_task.delay.assert_called_once_with(group.id, member.id, GroupMembership.MEMBER, None)
@patch('mozillians.groups.models.email_membership_change')
def test_invalidate_group_by_request(self, mail_task):
member = UserFactory.create(vouched=True)
curator = UserFactory.create(vouched=True)
group = GroupFactory.create(name='Foo', invalidation_days=5,
accepting_new_members=Group.REVIEWED)
group.curators.add(curator.userprofile)
group.add_member(curator.userprofile)
group.add_member(member.userprofile)
membership = group.groupmembership_set.filter(userprofile=member.userprofile)
curator_membership = group.groupmembership_set.filter(userprofile=curator.userprofile)
membership.update(updated_on=datetime.now() - timedelta(days=10))
curator_membership.update(updated_on=datetime.now() - timedelta(days=10))
eq_(membership[0].status, GroupMembership.MEMBER)
eq_(curator_membership[0].status, GroupMembership.MEMBER)
invalidate_group_membership()
ok_(group.groupmembership_set.filter(userprofile=member.userprofile,
status=GroupMembership.PENDING).exists())
ok_(group.groupmembership_set.filter(userprofile=curator.userprofile).exists())
mail_task.delay.assert_called_once_with(group.id, member.id, GroupMembership.MEMBER,
GroupMembership.PENDING)
@patch('mozillians.groups.models.email_membership_change')
def invalidate_closed_group(self, mail_task):
member = UserFactory.create(vouched=True)
curator = UserFactory.create(vouched=True)
group = GroupFactory.create(name='Foo', invalidation_days=5,
accepting_new_members=Group.CLOSED)
group.curators.add(curator.userprofile)
group.add_member(curator.userprofile)
group.add_member(member.userprofile)
membership = group.groupmembership_set.filter(userprofile=member.userprofile)
curator_membership = group.groupmembership_set.filter(userprofile=curator.userprofile)
membership.update(updated_on=datetime.now() - timedelta(days=10))
curator_membership.update(updated_on=datetime.now() - timedelta(days=10))
eq_(membership[0].status, GroupMembership.MEMBER)
eq_(curator_membership[0].status, GroupMembership.MEMBER)
invalidate_group_membership()
ok_(group.groupmembership_set.filter(userprofile=member.userprofile,
status=GroupMembership.PENDING).exists())
ok_(group.groupmembership_set.filter(userprofile=curator.userprofile).exists())
mail_task.delay.assert_called_once_with(group.id, member.id, GroupMembership.MEMBER,
GroupMembership.PENDING)
@patch('mozillians.groups.models.email_membership_change')
def test_invalidate_group_pending_membership(self, mail_task):
"""Invalidate a group where a user has not yet been accepted by a curator.
Type is indifferent for this test.
"""
member = UserFactory.create(vouched=True)
curator = UserFactory.create(vouched=True)
group = GroupFactory.create(name='Foo', invalidation_days=5)
group.curators.add(curator.userprofile)
group.add_member(curator.userprofile)
GroupMembership.objects.create(userprofile=member.userprofile, group=group,
status=GroupMembership.PENDING,
updated_on=datetime.now() - timedelta(days=10))
curator_membership = group.groupmembership_set.filter(userprofile=curator.userprofile)
curator_membership.update(updated_on=datetime.now() - timedelta(days=10))
eq_(curator_membership[0].status, GroupMembership.MEMBER)
invalidate_group_membership()
ok_(group.groupmembership_set.filter(userprofile=member.userprofile,
status=GroupMembership.PENDING).exists())
ok_(group.groupmembership_set.filter(userprofile=curator.userprofile).exists())
ok_(not mail_task.called)
@patch('mozillians.groups.models.email_membership_change')
def invalidate_group_pending_terms(self, mail_task):
"""Invalidate a group where a user has not yet accepted the terms.
Type is indifferent for this test.
"""
member = UserFactory.create(vouched=True)
curator = UserFactory.create(vouched=True)
group = GroupFactory.create(name='Foo', invalidation_days=5)
group.curators.add(curator.userprofile)
group.add_member(curator.userprofile)
GroupMembership.objects.create(userprofile=member.userprofile, group=group,
status=GroupMembership.PENDING_TERMS,
updated_on=datetime.now() - timedelta(days=10))
curator_membership = group.groupmembership_set.filter(userprofile=curator.userprofile)
curator_membership.update(updated_on=datetime.now() - timedelta(days=10))
eq_(curator_membership[0].status, GroupMembership.MEMBER)
invalidate_group_membership()
ok_(group.groupmembership_set.filter(userprofile=member.userprofile,
status=GroupMembership.PENDING_TERMS).exists())
ok_(group.groupmembership_set.filter(userprofile=curator.userprofile).exists())
ok_(not mail_task.called)
class InvitationEmailTests(TestCase):
@patch('mozillians.groups.tasks.send_mail')
@override_settings(FROM_NOREPLY='noreply@example.com')
def test_send_invitation_email(self, mock_send_email):
inviter, redeemer = UserFactory.create_batch(2)
group = GroupFactory.create(name='Foo')
template_name = 'groups/email/invite_email.txt'
invite = InviteFactory.create(inviter=inviter.userprofile,
redeemer=redeemer.userprofile,
group=group)
with patch('mozillians.groups.tasks.get_template', autospec=True) as mock_get_template:
tasks.notify_redeemer_invitation(invite.pk)
args = [
'[Mozillians] You have been invited to join group "foo"',
ANY,
'noreply@example.com',
[redeemer.userprofile.email]
]
ok_(mock_get_template.called)
eq_(template_name, mock_get_template.call_args[0][0])
mock_send_email.assert_called_once_with(*args)
@patch('mozillians.groups.tasks.send_mail')
@override_settings(FROM_NOREPLY='noreply@example.com')
def test_send_invitation_accepted_email(self, mock_send_email):
inviter = UserFactory.create()
redeemer = UserFactory.create(userprofile={'full_name': u'fôô bar'})
group = GroupFactory.create(name='Foo')
template_name = 'groups/email/invite_accepted_email.txt'
invite = InviteFactory.create(inviter=inviter.userprofile,
redeemer=redeemer.userprofile,
group=group)
with patch('mozillians.groups.tasks.get_template', autospec=True) as mock_get_template:
tasks.notify_curators_invitation_accepted(invite.pk)
args = [u'[Mozillians] fôô bar has accepted your invitation to join group "foo"',
ANY,
'noreply@example.com',
[inviter.userprofile.email]]
ok_(mock_get_template.called)
eq_(template_name, mock_get_template.call_args[0][0])
mock_send_email.assert_called_once_with(*args)
@patch('mozillians.groups.tasks.send_mail')
@override_settings(FROM_NOREPLY='noreply@example.com')
def test_send_invitation_rejected_email(self, mock_send_email):
inviter = UserFactory.create()
redeemer = UserFactory.create(userprofile={'full_name': u'fôô bar'})
group = GroupFactory.create(name='Foo')
template_name = 'groups/email/invite_rejected_email.txt'
InviteFactory.create(inviter=inviter.userprofile, redeemer=redeemer.userprofile,
group=group)
with patch('mozillians.groups.tasks.get_template', autospec=True) as mock_get_template:
args = [redeemer.userprofile.pk, inviter.userprofile.pk, group.pk]
tasks.notify_curators_invitation_rejected(*args)
args = [u'[Mozillians] fôô bar has rejected your invitation to join group "foo"',
ANY,
'noreply@example.com',
[inviter.userprofile.email]]
ok_(mock_get_template.called)
eq_(template_name, mock_get_template.call_args[0][0])
mock_send_email.assert_called_once_with(*args)
@patch('mozillians.groups.tasks.send_mail')
@override_settings(FROM_NOREPLY='noreply@example.com')
def test_send_invitation_invalid_email(self, mock_send_email):
inviter, redeemer = UserFactory.create_batch(2)
group = GroupFactory.create(name='Foo')
template_name = 'groups/email/invite_invalid_email.txt'
InviteFactory.create(inviter=inviter.userprofile,
redeemer=redeemer.userprofile,
group=group)
with patch('mozillians.groups.tasks.get_template', autospec=True) as mock_get_template:
tasks.notify_redeemer_invitation_invalid(redeemer.userprofile.pk, group.pk)
args = [
'[Mozillians] Invitation to group "foo" is no longer valid',
ANY,
'noreply@example.com',
[redeemer.userprofile.email]
]
ok_(mock_get_template.called)
eq_(template_name, mock_get_template.call_args[0][0])
mock_send_email.assert_called_once_with(*args)
class MembershipRenewalNotificationTests(TestCase):
@patch('mozillians.groups.tasks.send_mail')
@patch('mozillians.groups.tasks.now')
def test_send_renewal_notification_email(self, mock_now, mock_send_mail):
"""Test renewal notification functionality"""
curator = UserFactory.create()
member = UserFactory.create()
group = GroupFactory.create(name='foobar', invalidation_days=365,
accepting_new_members=Group.REVIEWED)
group.curators.add(curator.userprofile)
group.add_member(member.userprofile)
datetime_now = now() + timedelta(days=351)
mock_now.return_value = datetime_now
notify_membership_renewal()
ok_(mock_send_mail.called)
eq_(2, len(mock_send_mail.call_args_list))
name, args, kwargs = mock_send_mail.mock_calls[0]
subject, body, from_addr, to_list = args
eq_(subject, '[Mozillians] Your membership to Mozilla group "foobar" is about to expire')
eq_(from_addr, settings.FROM_NOREPLY)
eq_(to_list, [member.userprofile.email])
@patch('mozillians.groups.tasks.send_mail')
@patch('mozillians.groups.tasks.now')
def test_send_renewal_notification_curators_email(self, mock_now, mock_send_mail):
"""Test renewal notification functionality for curators"""
curator1 = UserFactory.create(email='foo@example.com')
curator2 = UserFactory.create(email='foobar@example.com')
member = UserFactory.create(userprofile={'full_name': 'Example Name'})
group = GroupFactory.create(name='foobar', invalidation_days=365,
accepting_new_members=Group.REVIEWED)
group.curators.add(curator1.userprofile)
group.curators.add(curator2.userprofile)
group.add_member(member.userprofile)
datetime_now = now() + timedelta(days=351)
mock_now.return_value = datetime_now
notify_membership_renewal()
ok_(mock_send_mail.called)
eq_(3, len(mock_send_mail.mock_calls))
# Check email for curator1
name, args, kwargs = mock_send_mail.mock_calls[1]
subject, body, from_addr, to_list = args
eq_(subject, '[Mozillians][foobar] Membership of "Example Name" is about to expire')
eq_(from_addr, settings.FROM_NOREPLY)
eq_(list(to_list), [u'foo@example.com'])
# Check email for curator2
name, args, kwargs = mock_send_mail.mock_calls[2]
subject, body, from_addr, to_list = args
eq_(subject, '[Mozillians][foobar] Membership of "Example Name" is about to expire')
eq_(from_addr, settings.FROM_NOREPLY)
eq_(list(to_list), [u'foobar@example.com'])
@patch('mozillians.groups.tasks.send_mail')
@patch('mozillians.groups.tasks.now')
def test_send_renewal_notification_inviters_email(self, mock_now, mock_send_mail):
"""Test renewal notification functionality for curators"""
curator1 = UserFactory.create(email='foo@example.com')
curator2 = UserFactory.create(email='foobar@example.com')
curator3 = UserFactory.create(email='bar@example.com')
member = UserFactory.create(userprofile={'full_name': 'Example Name'})
group = GroupFactory.create(name='foobar', invalidation_days=365,
accepting_new_members=Group.CLOSED)
group.curators.add(curator1.userprofile)
group.curators.add(curator2.userprofile)
group.curators.add(curator3.userprofile)
group.add_member(member.userprofile)
InviteFactory.create(inviter=curator3.userprofile, redeemer=member.userprofile,
group=group)
datetime_now = now() + timedelta(days=351)
mock_now.return_value = datetime_now
notify_membership_renewal()
ok_(mock_send_mail.called)
eq_(2, len(mock_send_mail.mock_calls))
# Check email for inviter
name, args, kwargs = mock_send_mail.mock_calls[1]
subject, body, from_addr, to_list = args
eq_(subject, '[Mozillians][foobar] Membership of "Example Name" is about to expire')
eq_(from_addr, settings.FROM_NOREPLY)
eq_(list(to_list), [u'bar@example.com'])
@patch('mozillians.groups.tasks.send_mail')
@patch('mozillians.groups.tasks.now')
def test_send_renewal_notification_inviter_not_curator(self, mock_now, mock_send_mail):
"""Test renewal notification functionality for curators"""
curator1 = UserFactory.create(email='foo@example.com')
curator2 = UserFactory.create(email='foobar@example.com')
inviter = UserFactory.create(email='bar@example.com')
member = UserFactory.create(userprofile={'full_name': 'Example Name'})
group = GroupFactory.create(name='foobar', invalidation_days=365,
accepting_new_members=Group.CLOSED)
group.curators.add(curator1.userprofile)
group.curators.add(curator2.userprofile)
group.add_member(member.userprofile)
InviteFactory.create(inviter=inviter.userprofile, redeemer=member.userprofile,
group=group)
datetime_now = now() + timedelta(days=351)
mock_now.return_value = datetime_now
notify_membership_renewal()
ok_(mock_send_mail.called)
eq_(3, len(mock_send_mail.mock_calls))
# Check email to mozillians
name, args, kwargs = mock_send_mail.mock_calls[0]
subject, body, from_addr, to_list = args
eq_(subject, '[Mozillians] Your membership to Mozilla group "foobar" is about to expire')
eq_(from_addr, settings.FROM_NOREPLY)
eq_(to_list, [member.userprofile.email])
# Check email for curator1
name, args, kwargs = mock_send_mail.mock_calls[1]
subject, body, from_addr, to_list = args
eq_(subject, '[Mozillians][foobar] Membership of "Example Name" is about to expire')
eq_(from_addr, settings.FROM_NOREPLY)
eq_(list(to_list), [u'foo@example.com'])
# Check email for curator2
name, args, kwargs = mock_send_mail.mock_calls[2]
subject, body, from_addr, to_list = args
eq_(subject, '[Mozillians][foobar] Membership of "Example Name" is about to expire')
eq_(from_addr, settings.FROM_NOREPLY)
eq_(list(to_list), [u'foobar@example.com'])
@patch('mozillians.groups.tasks.now')
def test_invalidation_days_less_than_2_weeks(self, mock_now):
"""Test renewal notification for groups with invalidation_days less than 2 weeks"""
curator = UserFactory.create()
member = UserFactory.create()
group = GroupFactory.create(name='foobar', invalidation_days=10,
accepting_new_members=Group.REVIEWED)
group.curators.add(curator.userprofile)
group.add_member(member.userprofile)
datetime_now = now() + timedelta(days=10)
mock_now.return_value = datetime_now
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
notify_membership_renewal()
ok_(not mock_send_mail.called)
| 47.457143
| 98
| 0.680455
| 3,287
| 28,237
| 5.613934
| 0.069973
| 0.027746
| 0.029914
| 0.043679
| 0.853845
| 0.830597
| 0.80892
| 0.796185
| 0.789574
| 0.773208
| 0
| 0.006306
| 0.224988
| 28,237
| 594
| 99
| 47.537037
| 0.836913
| 0.067358
| 0
| 0.726862
| 0
| 0
| 0.121888
| 0.058027
| 0
| 0
| 0
| 0
| 0.015801
| 1
| 0.054176
| false
| 0
| 0.029345
| 0
| 0.094808
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4d087e6522388f96c33c6fa172ac5137b8196171
| 534
|
py
|
Python
|
iam_starter/aws_util_exceptions.py
|
billtrust/iam-starter
|
765aaded6e46be5382e69726aaaf363a98b288e0
|
[
"MIT"
] | 2
|
2019-08-25T11:01:07.000Z
|
2021-03-22T10:25:49.000Z
|
iam_starter/aws_util_exceptions.py
|
billtrust/iam-starter
|
765aaded6e46be5382e69726aaaf363a98b288e0
|
[
"MIT"
] | null | null | null |
iam_starter/aws_util_exceptions.py
|
billtrust/iam-starter
|
765aaded6e46be5382e69726aaaf363a98b288e0
|
[
"MIT"
] | null | null | null |
class ProfileParsingError(Exception):
pass
class RoleNotFoundError(Exception):
def __init__(self, credential_method, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
# a string describing the IAM context
self.credential_method = credential_method
class AssumeRoleError(Exception):
def __init__(self, credential_method, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
# a string describing the IAM context
self.credential_method = credential_method
| 33.375
| 59
| 0.709738
| 56
| 534
| 6.375
| 0.339286
| 0.268908
| 0.22409
| 0.112045
| 0.778711
| 0.778711
| 0.778711
| 0.778711
| 0.778711
| 0.778711
| 0
| 0
| 0.198502
| 534
| 15
| 60
| 35.6
| 0.834112
| 0.132959
| 0
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.1
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
4d37425b57a0f469decd0e7d41a67cfdca83ab27
| 22,430
|
py
|
Python
|
benchmarks/import_cost/functions_100_with_5_contracts.py
|
kklein/icontract
|
718ef1733cc2cce6d3c8f59a5a37de96f8be6664
|
[
"MIT"
] | 244
|
2018-08-15T22:58:58.000Z
|
2022-03-12T16:10:39.000Z
|
benchmarks/import_cost/functions_100_with_5_contracts.py
|
kklein/icontract
|
718ef1733cc2cce6d3c8f59a5a37de96f8be6664
|
[
"MIT"
] | 157
|
2018-08-29T21:36:47.000Z
|
2022-02-14T19:30:24.000Z
|
benchmarks/import_cost/functions_100_with_5_contracts.py
|
kklein/icontract
|
718ef1733cc2cce6d3c8f59a5a37de96f8be6664
|
[
"MIT"
] | 23
|
2019-04-24T11:09:10.000Z
|
2022-02-14T15:56:26.000Z
|
#!/usr/bin/env python3
import icontract
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func0(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func1(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func2(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func3(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func4(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func5(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func6(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func7(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func8(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func9(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func10(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func11(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func12(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func13(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func14(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func15(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func16(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func17(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func18(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func19(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func20(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func21(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func22(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func23(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func24(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func25(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func26(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func27(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func28(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func29(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func30(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func31(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func32(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func33(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func34(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func35(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func36(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func37(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func38(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func39(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func40(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func41(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func42(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func43(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func44(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func45(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func46(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func47(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func48(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func49(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func50(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func51(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func52(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func53(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func54(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func55(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func56(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func57(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func58(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func59(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func60(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func61(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func62(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func63(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func64(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func65(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func66(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func67(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func68(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func69(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func70(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func71(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func72(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func73(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func74(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func75(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func76(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func77(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func78(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func79(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func80(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func81(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func82(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func83(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func84(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func85(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func86(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func87(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func88(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func89(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func90(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func91(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func92(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func93(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func94(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func95(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func96(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func97(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func98(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
@icontract.require(lambda x: x > 1)
@icontract.require(lambda x: x > 2)
@icontract.require(lambda x: x > 3)
@icontract.require(lambda x: x > 4)
def some_func99(x: int) -> None:
pass
| 24.839424
| 35
| 0.674142
| 3,706
| 22,430
| 4.053157
| 0.03211
| 0.532588
| 0.732308
| 0.765595
| 0.957859
| 0.957859
| 0.957859
| 0.957859
| 0.957859
| 0.957859
| 0
| 0.036901
| 0.165136
| 22,430
| 902
| 36
| 24.866962
| 0.765246
| 0.000936
| 0
| 0.85592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142653
| false
| 0.142653
| 0.001427
| 0
| 0.14408
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 11
|
4d8206b16ca710e21344f5d525360f72ba31d261
| 51,514
|
py
|
Python
|
src/mlpack/bindings/python/tests/test_python_binding.py
|
tomjpsun/mlpack
|
39b9a852c58b648ddb9b87a3d87aa3db2bacbf0a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2020-02-29T17:39:51.000Z
|
2020-05-16T23:36:01.000Z
|
src/mlpack/bindings/python/tests/test_python_binding.py
|
tomjpsun/mlpack
|
39b9a852c58b648ddb9b87a3d87aa3db2bacbf0a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2020-04-10T17:39:50.000Z
|
2020-04-11T14:56:25.000Z
|
src/mlpack/bindings/python/tests/test_python_binding.py
|
tomjpsun/mlpack
|
39b9a852c58b648ddb9b87a3d87aa3db2bacbf0a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2020-06-05T13:27:26.000Z
|
2020-06-23T09:44:31.000Z
|
#!/usr/bin/env python
"""
test_python_binding.py
Test that passing types to Python bindings works successfully.
mlpack is free software; you may redistribute it and/or modify it under the
terms of the 3-clause BSD license. You should have received a copy of the
3-clause BSD license along with mlpack. If not, see
http://www.opensource.org/licenses/BSD-3-Clause for more information.
"""
import unittest
import pandas as pd
import numpy as np
import copy
from mlpack.test_python_binding import test_python_binding
class TestPythonBinding(unittest.TestCase):
"""
This class tests the basic functionality of the Python bindings.
"""
def testRunBindingCorrectly(self):
"""
Test that when we run the binding correctly (with correct input parameters),
we get the expected output.
"""
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True)
self.assertEqual(output['string_out'], 'hello2')
self.assertEqual(output['int_out'], 13)
self.assertEqual(output['double_out'], 5.0)
def testRunBindingNoFlag(self):
"""
If we forget the mandatory flag, we should get wrong results.
"""
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0])
self.assertNotEqual(output['string_out'], 'hello2')
self.assertNotEqual(output['int_out'], 13)
self.assertNotEqual(output['double_out'], 5.0)
def testRunBindingWrongString(self):
"""
If we give the wrong string, we should get wrong results.
"""
output = test_python_binding(string_in='goodbye',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True)
self.assertNotEqual(output['string_out'], 'hello2')
def testRunBindingWrongInt(self):
"""
If we give the wrong int, we should get wrong results.
"""
output = test_python_binding(string_in='hello',
int_in=15,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True)
self.assertNotEqual(output['int_out'], 13)
def testRunBindingWrongDouble(self):
"""
If we give the wrong double, we should get wrong results.
"""
output = test_python_binding(string_in='hello',
int_in=12,
double_in=2.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True)
self.assertNotEqual(output['double_out'], 5.0)
def testRunBadFlag(self):
"""
If we give the second flag, this should fail.
"""
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True,
flag2=True)
self.assertNotEqual(output['string_out'], 'hello2')
self.assertNotEqual(output['int_out'], 13)
self.assertNotEqual(output['double_out'], 5.0)
def testNumpyMatrix(self):
"""
The matrix we pass in, we should get back with the third dimension doubled
and the fifth forgotten.
"""
x = np.random.rand(100, 5);
z = copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
matrix_in=z)
self.assertEqual(output['matrix_out'].shape[0], 100)
self.assertEqual(output['matrix_out'].shape[1], 4)
self.assertEqual(output['matrix_out'].dtype, np.double)
for i in [0, 1, 3]:
for j in range(100):
self.assertEqual(x[j, i], output['matrix_out'][j, i])
for j in range(100):
self.assertEqual(2 * x[j, 2], output['matrix_out'][j, 2])
def testNumpyMatrixForceCopy(self):
"""
The matrix we pass in, we should get back with the third dimension doubled
and the fifth forgotten.
"""
x = np.random.rand(100, 5);
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
matrix_in=x,
copy_all_inputs=True)
self.assertEqual(output['matrix_out'].shape[0], 100)
self.assertEqual(output['matrix_out'].shape[1], 4)
self.assertEqual(output['matrix_out'].dtype, np.double)
for i in [0, 1, 3]:
for j in range(100):
self.assertEqual(x[j, i], output['matrix_out'][j, i])
for j in range(100):
self.assertEqual(2 * x[j, 2], output['matrix_out'][j, 2])
def testNumpyFContiguousMatrix(self):
"""
The matrix with F_CONTIGUOUS set we pass in, we should get back with the third
dimension doubled and the fifth forgotten.
"""
x = np.array(np.random.rand(100, 5), order='F');
z = copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
matrix_in=z)
self.assertEqual(output['matrix_out'].shape[0], 100)
self.assertEqual(output['matrix_out'].shape[1], 4)
self.assertEqual(output['matrix_out'].dtype, np.double)
for i in [0, 1, 3]:
for j in range(100):
self.assertEqual(x[j, i], output['matrix_out'][j, i])
for j in range(100):
self.assertEqual(2 * x[j, 2], output['matrix_out'][j, 2])
def testNumpyFContiguousMatrixForceCopy(self):
"""
The matrix with F_CONTIGUOUS set we pass in, we should get back with the third
dimension doubled and the fifth forgotten.
"""
x = np.array(np.random.rand(100, 5), order='F');
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
matrix_in=x,
copy_all_inputs=True)
self.assertEqual(output['matrix_out'].shape[0], 100)
self.assertEqual(output['matrix_out'].shape[1], 4)
self.assertEqual(output['matrix_out'].dtype, np.double)
for i in [0, 1, 3]:
for j in range(100):
self.assertEqual(x[j, i], output['matrix_out'][j, i])
for j in range(100):
self.assertEqual(2 * x[j, 2], output['matrix_out'][j, 2])
def testPandasSeriesMatrix(self):
"""
Test that we can pass pandas.Series as input parameter.
"""
x = pd.Series(np.random.rand(100))
z = x.copy(deep=True)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
smatrix_in=z)
self.assertEqual(output['smatrix_out'].shape[0], 100)
self.assertEqual(output['smatrix_out'].dtype, np.double)
for i in range(100):
self.assertEqual(output['smatrix_out'][i,0], x.iloc[i] * 2)
def testPandasSeriesMatrixForceCopy(self):
"""
Test that we can pass pandas.Series as input parameter.
"""
x = pd.Series(np.random.rand(100))
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
smatrix_in=x,
copy_all_inputs=True)
self.assertEqual(output['smatrix_out'].shape[0], 100)
self.assertEqual(output['smatrix_out'].dtype, np.double)
for i in range(100):
self.assertEqual(output['smatrix_out'][i,0], x.iloc[i] * 2)
def testPandasSeriesUMatrix(self):
"""
Test that we can pass pandas.Series as input parameter.
"""
x = pd.Series(np.random.randint(0, high=500, size=100))
z = x.copy(deep=True)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
s_umatrix_in=z)
self.assertEqual(output['s_umatrix_out'].shape[0], 100)
self.assertEqual(output['s_umatrix_out'].dtype, np.dtype('intp'))
for i in range(100):
self.assertEqual(output['s_umatrix_out'][i, 0], x.iloc[i] * 2)
def testPandasSeriesUMatrixForceCopy(self):
"""
Test that we can pass pandas.Series as input parameter.
"""
x = pd.Series(np.random.randint(0, high=500, size=100))
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
s_umatrix_in=x,
copy_all_inputs=True)
self.assertEqual(output['s_umatrix_out'].shape[0], 100)
self.assertEqual(output['s_umatrix_out'].dtype, np.dtype('intp'))
for i in range(100):
self.assertEqual(output['s_umatrix_out'][i, 0], x.iloc[i] * 2)
def testPandasSeries(self):
"""
Test a Pandas Series input paramter
"""
x = pd.Series(np.random.rand(100))
z = copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
col_in=z)
self.assertEqual(output['col_out'].shape[0], 100)
self.assertEqual(output['col_out'].dtype, np.double)
for i in range(100):
self.assertEqual(output['col_out'][i], z[i] * 2)
def testPandasSeriesForceCopy(self):
"""
Test a Pandas Series input paramter
"""
x = pd.Series(np.random.rand(100))
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
col_in=x,
copy_all_inputs=True)
self.assertEqual(output['col_out'].shape[0], 100)
self.assertEqual(output['col_out'].dtype, np.double)
for i in range(100):
self.assertEqual(output['col_out'][i], x[i] * 2)
def testPandasDataFrameMatrix(self):
"""
The matrix we pass in, we should get back with the third dimension doubled
and the fifth forgotten.
"""
x = pd.DataFrame(np.random.rand(100, 5))
z = x.copy(deep=True)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
matrix_in=z)
self.assertEqual(output['matrix_out'].shape[0], 100)
self.assertEqual(output['matrix_out'].shape[1], 4)
self.assertEqual(output['matrix_out'].dtype, np.double)
for i in [0, 1, 3]:
for j in range(100):
self.assertEqual(x.iloc[j, i], output['matrix_out'][j, i])
for j in range(100):
self.assertEqual(2 * x.iloc[j, 2], output['matrix_out'][j, 2])
def testPandasDataFrameMatrixForceCopy(self):
"""
The matrix we pass in, we should get back with the third dimension doubled
and the fifth forgotten.
"""
x = pd.DataFrame(np.random.rand(100, 5))
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
matrix_in=x,
copy_all_inputs=True)
self.assertEqual(output['matrix_out'].shape[0], 100)
self.assertEqual(output['matrix_out'].shape[1], 4)
self.assertEqual(output['matrix_out'].dtype, np.double)
for i in [0, 1, 3]:
for j in range(100):
self.assertEqual(x.iloc[j, i], output['matrix_out'][j, i])
for j in range(100):
self.assertEqual(2 * x.iloc[j, 2], output['matrix_out'][j, 2])
def testArraylikeMatrix(self):
"""
Test that we can pass an arraylike matrix.
"""
x = [[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
matrix_in=x)
self.assertEqual(output['matrix_out'].shape[0], 3)
self.assertEqual(output['matrix_out'].shape[1], 4)
self.assertEqual(output['matrix_out'].dtype, np.double)
self.assertEqual(output['matrix_out'][0, 0], 1)
self.assertEqual(output['matrix_out'][0, 1], 2)
self.assertEqual(output['matrix_out'][0, 2], 6)
self.assertEqual(output['matrix_out'][0, 3], 4)
self.assertEqual(output['matrix_out'][1, 0], 6)
self.assertEqual(output['matrix_out'][1, 1], 7)
self.assertEqual(output['matrix_out'][1, 2], 16)
self.assertEqual(output['matrix_out'][1, 3], 9)
self.assertEqual(output['matrix_out'][2, 0], 11)
self.assertEqual(output['matrix_out'][2, 1], 12)
self.assertEqual(output['matrix_out'][2, 2], 26)
self.assertEqual(output['matrix_out'][2, 3], 14)
def testArraylikeMatrixForceCopy(self):
"""
Test that we can pass an arraylike matrix.
"""
x = [[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
matrix_in=x,
copy_all_inputs=True)
self.assertEqual(output['matrix_out'].shape[0], 3)
self.assertEqual(output['matrix_out'].shape[1], 4)
self.assertEqual(len(x), 3)
self.assertEqual(len(x[0]), 5)
self.assertEqual(output['matrix_out'].dtype, np.double)
self.assertEqual(output['matrix_out'][0, 0], 1)
self.assertEqual(output['matrix_out'][0, 1], 2)
self.assertEqual(output['matrix_out'][0, 2], 6)
self.assertEqual(output['matrix_out'][0, 3], 4)
self.assertEqual(output['matrix_out'][1, 0], 6)
self.assertEqual(output['matrix_out'][1, 1], 7)
self.assertEqual(output['matrix_out'][1, 2], 16)
self.assertEqual(output['matrix_out'][1, 3], 9)
self.assertEqual(output['matrix_out'][2, 0], 11)
self.assertEqual(output['matrix_out'][2, 1], 12)
self.assertEqual(output['matrix_out'][2, 2], 26)
self.assertEqual(output['matrix_out'][2, 3], 14)
def testNumpyUmatrix(self):
"""
Same as testNumpyMatrix() but with an unsigned matrix.
"""
x = np.random.randint(0, high=500, size=[100, 5])
z = copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
umatrix_in=z)
self.assertEqual(output['umatrix_out'].shape[0], 100)
self.assertEqual(output['umatrix_out'].shape[1], 4)
self.assertEqual(output['umatrix_out'].dtype, np.dtype('intp'))
for i in [0, 1, 3]:
for j in range(100):
self.assertEqual(x[j, i], output['umatrix_out'][j, i])
for j in range(100):
self.assertEqual(2 * x[j, 2], output['umatrix_out'][j, 2])
def testNumpyUmatrixForceCopy(self):
"""
Same as testNumpyMatrix() but with an unsigned matrix.
"""
x = np.random.randint(0, high=500, size=[100, 5])
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
umatrix_in=x,
copy_all_inputs=True)
self.assertEqual(output['umatrix_out'].shape[0], 100)
self.assertEqual(output['umatrix_out'].shape[1], 4)
self.assertEqual(output['umatrix_out'].dtype, np.dtype('intp'))
for i in [0, 1, 3]:
for j in range(100):
self.assertEqual(x[j, i], output['umatrix_out'][j, i])
for j in range(100):
self.assertEqual(2 * x[j, 2], output['umatrix_out'][j, 2])
def testArraylikeUmatrix(self):
"""
Test that we can pass an arraylike unsigned matrix.
"""
x = [[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
umatrix_in=x)
self.assertEqual(output['umatrix_out'].shape[0], 3)
self.assertEqual(output['umatrix_out'].shape[1], 4)
self.assertEqual(output['umatrix_out'].dtype, np.dtype('intp'))
self.assertEqual(output['umatrix_out'][0, 0], 1)
self.assertEqual(output['umatrix_out'][0, 1], 2)
self.assertEqual(output['umatrix_out'][0, 2], 6)
self.assertEqual(output['umatrix_out'][0, 3], 4)
self.assertEqual(output['umatrix_out'][1, 0], 6)
self.assertEqual(output['umatrix_out'][1, 1], 7)
self.assertEqual(output['umatrix_out'][1, 2], 16)
self.assertEqual(output['umatrix_out'][1, 3], 9)
self.assertEqual(output['umatrix_out'][2, 0], 11)
self.assertEqual(output['umatrix_out'][2, 1], 12)
self.assertEqual(output['umatrix_out'][2, 2], 26)
self.assertEqual(output['umatrix_out'][2, 3], 14)
def testArraylikeUmatrixForceCopy(self):
"""
Test that we can pass an arraylike unsigned matrix.
"""
x = [[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
umatrix_in=x,
copy_all_inputs=True)
self.assertEqual(output['umatrix_out'].shape[0], 3)
self.assertEqual(output['umatrix_out'].shape[1], 4)
self.assertEqual(len(x), 3)
self.assertEqual(len(x[0]), 5)
self.assertEqual(output['umatrix_out'].dtype, np.dtype('intp'))
self.assertEqual(output['umatrix_out'][0, 0], 1)
self.assertEqual(output['umatrix_out'][0, 1], 2)
self.assertEqual(output['umatrix_out'][0, 2], 6)
self.assertEqual(output['umatrix_out'][0, 3], 4)
self.assertEqual(output['umatrix_out'][1, 0], 6)
self.assertEqual(output['umatrix_out'][1, 1], 7)
self.assertEqual(output['umatrix_out'][1, 2], 16)
self.assertEqual(output['umatrix_out'][1, 3], 9)
self.assertEqual(output['umatrix_out'][2, 0], 11)
self.assertEqual(output['umatrix_out'][2, 1], 12)
self.assertEqual(output['umatrix_out'][2, 2], 26)
self.assertEqual(output['umatrix_out'][2, 3], 14)
def testCol(self):
"""
Test a column vector input parameter.
"""
x = np.random.rand(100)
z = copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
col_in=z)
self.assertEqual(output['col_out'].shape[0], 100)
self.assertEqual(output['col_out'].dtype, np.double)
for i in range(100):
self.assertEqual(output['col_out'][i], x[i] * 2)
def testColForceCopy(self):
"""
Test a column vector input parameter.
"""
x = np.random.rand(100)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
col_in=x,
copy_all_inputs=True)
self.assertEqual(output['col_out'].shape[0], 100)
self.assertEqual(output['col_out'].dtype, np.double)
for i in range(100):
self.assertEqual(output['col_out'][i], x[i] * 2)
def testUcol(self):
"""
Test an unsigned column vector input parameter.
"""
x = np.random.randint(0, high=500, size=100)
z = copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
ucol_in=z)
self.assertEqual(output['ucol_out'].shape[0], 100)
self.assertEqual(output['ucol_out'].dtype, np.dtype('intp'))
for i in range(100):
self.assertEqual(output['ucol_out'][i], x[i] * 2)
def testUcolForceCopy(self):
"""
Test an unsigned column vector input parameter.
"""
x = np.random.randint(0, high=500, size=100)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
ucol_in=x,
copy_all_inputs=True)
self.assertEqual(output['ucol_out'].shape[0], 100)
self.assertEqual(output['ucol_out'].dtype, np.dtype('intp'))
for i in range(100):
self.assertEqual(output['ucol_out'][i], x[i] * 2)
def testRow(self):
"""
Test a row vector input parameter.
"""
x = np.random.rand(100)
z = copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
row_in=z)
self.assertEqual(output['row_out'].shape[0], 100)
self.assertEqual(output['row_out'].dtype, np.double)
for i in range(100):
self.assertEqual(output['row_out'][i], x[i] * 2)
def testRowForceCopy(self):
"""
Test a row vector input parameter.
"""
x = np.random.rand(100)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
row_in=x,
copy_all_inputs=True)
self.assertEqual(output['row_out'].shape[0], 100)
self.assertEqual(output['row_out'].dtype, np.double)
for i in range(100):
self.assertEqual(output['row_out'][i], x[i] * 2)
def testUrow(self):
"""
Test an unsigned row vector input parameter.
"""
x = np.random.randint(0, high=500, size=100)
z = copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
urow_in=z)
self.assertEqual(output['urow_out'].shape[0], 100)
self.assertEqual(output['urow_out'].dtype, np.dtype('intp'))
for i in range(100):
self.assertEqual(output['urow_out'][i], x[i] * 2)
def testUrowForceCopy(self):
"""
Test an unsigned row vector input parameter.
"""
x = np.random.randint(0, high=500, size=100)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
urow_in=x,
copy_all_inputs=True)
self.assertEqual(output['urow_out'].shape[0], 100)
self.assertEqual(output['urow_out'].dtype, np.dtype('intp'))
for i in range(100):
self.assertEqual(output['urow_out'][i], x[i] * 2)
def testMatrixAndInfoNumpy(self):
"""
Test that we can pass a matrix with all numeric features.
"""
x = np.random.rand(100, 10)
z = copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
matrix_and_info_in=z)
self.assertEqual(output['matrix_and_info_out'].shape[0], 100)
self.assertEqual(output['matrix_and_info_out'].shape[1], 10)
for i in range(10):
for j in range(100):
self.assertEqual(output['matrix_and_info_out'][j, i], x[j, i] * 2.0)
def testMatrixAndInfoNumpyForceCopy(self):
"""
Test that we can pass a matrix with all numeric features.
"""
x = np.random.rand(100, 10)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
matrix_and_info_in=x,
copy_all_inputs=True)
self.assertEqual(output['matrix_and_info_out'].shape[0], 100)
self.assertEqual(output['matrix_and_info_out'].shape[1], 10)
for i in range(10):
for j in range(100):
self.assertEqual(output['matrix_and_info_out'][j, i], x[j, i] * 2.0)
def testMatrixAndInfoPandas(self):
"""
Test that we can pass a matrix with some categorical features.
"""
x = pd.DataFrame(np.random.rand(10, 4), columns=list('abcd'))
x['e'] = pd.Series(['a', 'b', 'c', 'd', 'a', 'b', 'e', 'c', 'a', 'b'],
dtype='category')
z = x.copy(deep=True)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
matrix_and_info_in=z)
self.assertEqual(output['matrix_and_info_out'].shape[0], 10)
self.assertEqual(output['matrix_and_info_out'].shape[1], 5)
cols = list('abcde')
for i in range(4):
for j in range(10):
self.assertEqual(output['matrix_and_info_out'][j, i], z[cols[i]][j] * 2)
for j in range(10):
self.assertEqual(output['matrix_and_info_out'][j, 4], z[cols[4]][j])
def testMatrixAndInfoPandasForceCopy(self):
"""
Test that we can pass a matrix with some categorical features.
"""
x = pd.DataFrame(np.random.rand(10, 4), columns=list('abcd'))
x['e'] = pd.Series(['a', 'b', 'c', 'd', 'a', 'b', 'e', 'c', 'a', 'b'],
dtype='category')
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
matrix_and_info_in=x,
copy_all_inputs=True)
self.assertEqual(output['matrix_and_info_out'].shape[0], 10)
self.assertEqual(output['matrix_and_info_out'].shape[1], 5)
cols = list('abcde')
for i in range(4):
for j in range(10):
self.assertEqual(output['matrix_and_info_out'][j, i], x[cols[i]][j] * 2)
for j in range(10):
self.assertEqual(output['matrix_and_info_out'][j, 4], x[cols[4]][j])
def testIntVector(self):
"""
Test that we can pass a vector of ints and get back that same vector but
with the last element removed.
"""
x = [1, 2, 3, 4, 5]
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
vector_in=x)
self.assertEqual(output['vector_out'], [1, 2, 3, 4])
def testStringVector(self):
"""
Test that we can pass a vector of strings and get back that same vector but
with the last element removed.
"""
x = ['one', 'two', 'three', 'four', 'five']
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
str_vector_in=x)
self.assertEqual(output['str_vector_out'],
['one', 'two', 'three', 'four'])
def testModel(self):
"""
First create a GaussianKernel object, then send it back and make sure we get
the right double value.
"""
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
build_model=True)
output2 = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
model_in=output['model_out'])
self.assertEqual(output2['model_bw_out'], 20.0)
def testOneDimensionNumpyMatrix(self):
"""
Test that we can pass one dimension matrix from matrix_in
"""
x = np.random.rand(100)
z = copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
smatrix_in=z)
self.assertEqual(output['smatrix_out'].shape[0], 100)
self.assertEqual(output['smatrix_out'].dtype, np.double)
for i in range(100):
self.assertEqual(output['smatrix_out'][i, 0], x[i] * 2)
def testOneDimensionNumpymatrixForceCopy(self):
"""
Test that we can pass one dimension matrix from matrix_in
"""
x = np.random.rand(100)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
smatrix_in=x,
copy_all_inputs=True)
self.assertEqual(output['smatrix_out'].shape[0], 100)
self.assertEqual(output['smatrix_out'].dtype, np.double)
for i in range(100):
self.assertEqual(output['smatrix_out'][i, 0], x[i] * 2)
def testOneDimensionNumpyUmatrix(self):
"""
Same as testNumpyMatrix() but with an unsigned matrix and One Dimension Matrix.
"""
x = np.random.randint(0, high=500, size=100)
z = copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
s_umatrix_in=z)
self.assertEqual(output['s_umatrix_out'].shape[0], 100)
self.assertEqual(output['s_umatrix_out'].dtype, np.dtype('intp'))
for i in range(100):
self.assertEqual(output['s_umatrix_out'][i, 0], x[i] * 2)
def testOneDimensionNumpyUmatrixForceCopy(self):
"""
Same as testNumpyMatrix() but with an unsigned matrix and One Dimension Matrix.
"""
x = np.random.randint(0, high=500, size=100)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
s_umatrix_in=x,
copy_all_inputs=True)
self.assertEqual(output['s_umatrix_out'].shape[0], 100)
self.assertEqual(output['s_umatrix_out'].dtype, np.dtype('intp'))
for i in range(100):
self.assertEqual(output['s_umatrix_out'][i, 0], x[i] * 2)
def testTwoDimensionCol(self):
"""
Test that we pass Two Dimension column vetor as input paramter
"""
x = np.random.rand(100,1)
z = copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
col_in=z)
self.assertEqual(output['col_out'].shape[0], 100)
self.assertEqual(output['col_out'].dtype, np.double)
for i in range(100):
self.assertEqual(output['col_out'][i], x[i] * 2)
def testTwoDimensionColForceCopy(self):
"""
Test that we pass Two Dimension column vetor as input paramter
"""
x = np.random.rand(100,1)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
col_in=x,
copy_all_inputs=True)
self.assertEqual(output['col_out'].shape[0], 100)
self.assertEqual(output['col_out'].dtype, np.double)
for i in range(100):
self.assertEqual(output['col_out'][i], x[i] * 2)
def testTwoDimensionUcol(self):
"""
Test that we pass Two Dimension unsigned column vector input parameter.
"""
x = np.random.randint(0, high=500, size=[100, 1])
z = copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
ucol_in=z)
self.assertEqual(output['ucol_out'].shape[0], 100)
self.assertEqual(output['ucol_out'].dtype, np.dtype('intp'))
for i in range(100):
self.assertEqual(output['ucol_out'][i], x[i] * 2)
def testTwoDimensionUcolForceCopy(self):
"""
Test that we pass Two Dimension unsigned column vector input parameter.
"""
x = np.random.randint(0, high=500, size=[100, 1])
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
ucol_in=x,
copy_all_inputs=True)
self.assertEqual(output['ucol_out'].shape[0], 100)
self.assertEqual(output['ucol_out'].dtype, np.dtype('intp'))
for i in range(100):
self.assertEqual(output['ucol_out'][i], x[i] * 2)
def testTwoDimensionRow(self):
"""
Test a two dimensional row vector input parameter.
"""
x = np.random.rand(100,1)
z =copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
row_in=x)
self.assertEqual(output['row_out'].shape[0], 100)
self.assertEqual(output['row_out'].dtype, np.double)
for i in range(100):
self.assertEqual(output['row_out'][i], z[i] * 2)
def testTwoDimensionRowForceCopy(self):
"""
Test a two dimensional row vector input parameter.
"""
x = np.random.rand(100,1)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
row_in=x,
copy_all_inputs=True)
self.assertEqual(output['row_out'].shape[0], 100)
self.assertEqual(output['row_out'].dtype, np.double)
for i in range(100):
self.assertEqual(output['row_out'][i], x[i] * 2)
def testTwoDimensionUrow(self):
"""
Test an unsigned two dimensional row vector input parameter.
"""
x = np.random.randint(0, high=500, size=[100, 1])
z = copy.deepcopy(x)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
urow_in=z)
self.assertEqual(output['urow_out'].shape[0], 100)
self.assertEqual(output['urow_out'].dtype, np.dtype('intp'))
for i in range(100):
self.assertEqual(output['urow_out'][i], x[i] * 2)
def testTwoDimensionUrowForceCopy(self):
"""
Test an unsigned two dimensional row vector input parameter.
"""
x = np.random.randint(5, high=500, size=[1, 101])
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
urow_in=x,
copy_all_inputs=True)
self.assertEqual(output['urow_out'].shape[0], 101)
self.assertEqual(output['urow_out'].dtype, np.dtype('intp'))
for i in range(101):
self.assertEqual(output['urow_out'][i], x[0][i] * 2)
def testOneDimensionMatrixAndInfoPandas(self):
"""
Test that we can pass a one dimension matrix with some categorical features.
"""
x = pd.DataFrame(np.random.rand(10))
z = x.copy(deep=True)
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
matrix_and_info_in=z[0])
self.assertEqual(output['matrix_and_info_out'].shape[0], 10)
for i in range(10):
self.assertEqual(output['matrix_and_info_out'][i, 0], x[0][i] * 2)
def testOneDimensionMatrixAndInfoPandasForceCopy(self):
"""
Test that we can pass a one dimension matrix with some categorical features.
"""
x = pd.DataFrame(np.random.rand(10))
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
matrix_and_info_in=x[0],
copy_all_inputs=True)
self.assertEqual(output['matrix_and_info_out'].shape[0], 10)
for j in range(10):
self.assertEqual(output['matrix_and_info_out'][j, 0], x[0][j]*2)
def testThrownException(self):
"""
Test that we pass wrong type and get back TypeError
"""
self.assertRaises(TypeError,
lambda : test_python_binding(string_in=10,
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=10.0,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in='bad',
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True,
flag2=10))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True,
matrix_in= 10.0))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True,
matrix_in= 1))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True,
matrix_and_info_in = 10.0))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True,
copy_all_inputs = 10.0))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True,
col_in = 10))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True,
row_in = 10.0))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True,
str_vector_in = 'bad'))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True,
urow_in = 10.0))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True,
ucol_in = 10.0))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True,
umatrix_in = 10.0))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True,
verbose = 10))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
flag1=True,
vector_in = 10.0))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=False,
col_req_in=[1.0],
flag1=True))
self.assertRaises(TypeError,
lambda : test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=False,
flag1=True))
def testModelForceCopy(self):
"""
First create a GaussianKernel object, then send it back and make sure we get
the right double value.
"""
output = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
build_model=True)
output2 = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
model_in=output['model_out'],
copy_all_inputs=True)
output3 = test_python_binding(string_in='hello',
int_in=12,
double_in=4.0,
mat_req_in=[[1.0]],
col_req_in=[1.0],
model_in=output['model_out'])
self.assertEqual(output2['model_bw_out'], 20.0)
self.assertEqual(output3['model_bw_out'], 20.0)
if __name__ == '__main__':
unittest.main()
| 38.414616
| 83
| 0.462282
| 5,828
| 51,514
| 3.899108
| 0.048044
| 0.1373
| 0.170965
| 0.045591
| 0.90125
| 0.896717
| 0.887212
| 0.885055
| 0.884439
| 0.879335
| 0
| 0.053454
| 0.420041
| 51,514
| 1,340
| 84
| 38.443284
| 0.707156
| 0.078309
| 0
| 0.872708
| 0
| 0
| 0.060804
| 0
| 0
| 0
| 0
| 0
| 0.253506
| 1
| 0.059331
| false
| 0
| 0.005394
| 0
| 0.065804
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4d8392d6d92f34e14af9041567ef5479cea24e50
| 10,059
|
py
|
Python
|
autoreplace.py
|
smithlabdurham/GEOL2301
|
147ac3c1b1e5212c8f352be3e59194c88a2a4f86
|
[
"CC-BY-3.0"
] | 1
|
2021-01-19T12:22:25.000Z
|
2021-01-19T12:22:25.000Z
|
autoreplace.py
|
smithlabdurham/GEOL2031
|
c096a4f2a7c156cc71112f38ba43ccf1583ad418
|
[
"CC-BY-3.0"
] | 5
|
2021-10-15T08:42:04.000Z
|
2022-03-17T16:10:20.000Z
|
autoreplace.py
|
smithlabdurham/frontiers
|
2cc12eefe48f7ca0bf897c58f0565c433a9a3ae3
|
[
"CC-BY-3.0"
] | null | null | null |
# Runs with PythonScript plugin
# Copy to %APPDATA%\Roaming\Notepad++\plugins\config\PythonScript\scripts
search_text_4 = '[4['
search_text_8 = '[8['
search_text_f = '[f['
search_text_h = '[h['
search_text_q = '[q['
search_text_i = '[i['
search_text_o = '[o['
search_text_R = '[R['
search_text_r = '[r['
search_text_S = '[S['
search_text_u = '[u['
search_text_v = '[v['
replacement_f = '<iframe title="SketchFab model" width="480" height="360"\n src="https://sketchfab.com/models/XXXXXXXXXXXXXXXXXXXXXXXXXXXXX/embed?ui_controls=0&ui_infos=0&ui_inspector=0&ui_watermark=1&ui_watermark_link=0" allow="autoplay; fullscreen; vr" mozallowfullscreen="true" webkitallowfullscreen="true"></iframe>'
replacement_h = '<div class="row">\n <div class="col-8 col-12-narrow">\n <h3>\n \n </h3>\n </div>\n </div>\n <div class="row">\n \n </div>'
replacement_i = '<li class="do">\n \n </li>\n <li class="how">\n \n </li>';
replacement_o = '<li class="option" onclick="Right|Wrong(this, \'TODO\');">\n \n </li>'
replacement_q = '<li class="question" onclick="Reveal(\'TODO\');">\n \n </li>\n <li class="hidden written answer" id="TODO">\n \n </li>'
replacement_r = '<div class="row">\n \n </div>'
replacement_R = '</div>\n </div>\n\n <div class="row">\n <div class="col-8 col-12-narrow">'
replacement_u = '<ul>\n <li class="question" onclick="Reveal(\'TODO\');">\n \n </li>\n <li class="hidden written answer" id="TODO">\n \n </li>\n </ul>'
replacement_v = '<div class="col-8 col-12-narrow">\n <iframe src="https://durham.cloud.panopto.eu/Panopto/Pages/Embed.aspx?id="\n height="360" width="640" allow="fullscreen" loading="lazy"></iframe>\n </div>'
replacement_4 = '<div class="col-4 col-12-narrow">\n <span class="image">\n <img src="images/" />\n </span>\n </div>'
replacement_8 = '<div class="col-8 col-12-narrow">\n <p>\n \n </p>\n </div>'
replacement_S = '\n </div>\n </div>\n </section>\n\n <section id="SECTION_ID" class="main">\n <header>\n <div class="container">\n <span class="image featured">\n <img src="images/IMAGE"\n title=""\n alt="Credit: " />\n </span>\n <h2>TODO_SECTION_HEADING</h2>\n </div>\n </header>\n <div class="content dark style3">\n <div class="container">\n <div class="row">\n <div class="col-8 col-12-narrow">\n <h3>TODO_SUBHEADING</h3>\n </div>\n </div>\n <div class="row">\n \n </div>';
def callback_sci_CHARADDED(args):
if chr(args['ch']) == '[':
cp = editor.getCurrentPos()
search_text_length = 3
start_of_search_text_pos = cp - search_text_length
if editor.getTextRange(start_of_search_text_pos, cp) == search_text_f:
editor.beginUndoAction()
editor.deleteRange(start_of_search_text_pos, search_text_length)
editor.insertText(start_of_search_text_pos, replacement_f)
editor.endUndoAction()
end_of_search_text_pos = start_of_search_text_pos + len(replacement_f)
editor.setCurrentPos(end_of_search_text_pos)
editor.setSelection(end_of_search_text_pos, end_of_search_text_pos)
editor.chooseCaretX()
elif editor.getTextRange(start_of_search_text_pos, cp) == search_text_R:
editor.beginUndoAction()
editor.deleteRange(start_of_search_text_pos, search_text_length)
editor.insertText(start_of_search_text_pos, replacement_R)
editor.endUndoAction()
end_of_search_text_pos = start_of_search_text_pos + len(replacement_R)
editor.setCurrentPos(end_of_search_text_pos)
editor.setSelection(end_of_search_text_pos, end_of_search_text_pos)
editor.chooseCaretX()
elif editor.getTextRange(start_of_search_text_pos, cp) == search_text_r:
editor.beginUndoAction()
editor.deleteRange(start_of_search_text_pos, search_text_length)
editor.insertText(start_of_search_text_pos, replacement_r)
editor.endUndoAction()
end_of_search_text_pos = start_of_search_text_pos + len(replacement_r)
editor.setCurrentPos(end_of_search_text_pos)
editor.setSelection(end_of_search_text_pos, end_of_search_text_pos)
editor.chooseCaretX()
elif editor.getTextRange(start_of_search_text_pos, cp) == search_text_h:
editor.beginUndoAction()
editor.deleteRange(start_of_search_text_pos, search_text_length)
editor.insertText(start_of_search_text_pos, replacement_h)
editor.endUndoAction()
end_of_search_text_pos = start_of_search_text_pos + len(replacement_h)
editor.setCurrentPos(end_of_search_text_pos)
editor.setSelection(end_of_search_text_pos, end_of_search_text_pos)
editor.chooseCaretX()
elif editor.getTextRange(start_of_search_text_pos, cp) == search_text_i:
editor.beginUndoAction()
editor.deleteRange(start_of_search_text_pos, search_text_length)
editor.insertText(start_of_search_text_pos, replacement_i)
editor.endUndoAction()
end_of_search_text_pos = start_of_search_text_pos + len(replacement_i)
editor.setCurrentPos(end_of_search_text_pos)
editor.setSelection(end_of_search_text_pos, end_of_search_text_pos)
editor.chooseCaretX()
elif editor.getTextRange(start_of_search_text_pos, cp) == search_text_o:
editor.beginUndoAction()
editor.deleteRange(start_of_search_text_pos, search_text_length)
editor.insertText(start_of_search_text_pos, replacement_o)
editor.endUndoAction()
end_of_search_text_pos = start_of_search_text_pos + len(replacement_o)
editor.setCurrentPos(end_of_search_text_pos)
editor.setSelection(end_of_search_text_pos, end_of_search_text_pos)
editor.chooseCaretX()
elif editor.getTextRange(start_of_search_text_pos, cp) == search_text_q:
editor.beginUndoAction()
editor.deleteRange(start_of_search_text_pos, search_text_length)
editor.insertText(start_of_search_text_pos, replacement_q)
editor.endUndoAction()
end_of_search_text_pos = start_of_search_text_pos + len(replacement_q)
editor.setCurrentPos(end_of_search_text_pos)
editor.setSelection(end_of_search_text_pos, end_of_search_text_pos)
editor.chooseCaretX()
elif editor.getTextRange(start_of_search_text_pos, cp) == search_text_u:
editor.beginUndoAction()
editor.deleteRange(start_of_search_text_pos, search_text_length)
editor.insertText(start_of_search_text_pos, replacement_u)
editor.endUndoAction()
end_of_search_text_pos = start_of_search_text_pos + len(replacement_u)
editor.setCurrentPos(end_of_search_text_pos)
editor.setSelection(end_of_search_text_pos, end_of_search_text_pos)
editor.chooseCaretX()
elif editor.getTextRange(start_of_search_text_pos, cp) == search_text_4:
editor.beginUndoAction()
editor.deleteRange(start_of_search_text_pos, search_text_length)
editor.insertText(start_of_search_text_pos, replacement_4)
editor.endUndoAction()
end_of_search_text_pos = start_of_search_text_pos + len(replacement_4)
editor.setCurrentPos(end_of_search_text_pos)
editor.setSelection(end_of_search_text_pos, end_of_search_text_pos)
editor.chooseCaretX()
elif editor.getTextRange(start_of_search_text_pos, cp) == search_text_8:
editor.beginUndoAction()
editor.deleteRange(start_of_search_text_pos, search_text_length)
editor.insertText(start_of_search_text_pos, replacement_8)
editor.endUndoAction()
end_of_search_text_pos = start_of_search_text_pos + len(replacement_8)
editor.setCurrentPos(end_of_search_text_pos)
editor.setSelection(end_of_search_text_pos, end_of_search_text_pos)
editor.chooseCaretX()
elif editor.getTextRange(start_of_search_text_pos, cp) == search_text_v:
editor.beginUndoAction()
editor.deleteRange(start_of_search_text_pos, search_text_length)
editor.insertText(start_of_search_text_pos, replacement_v)
editor.endUndoAction()
end_of_search_text_pos = start_of_search_text_pos + len(replacement_v)
editor.setCurrentPos(end_of_search_text_pos)
editor.setSelection(end_of_search_text_pos, end_of_search_text_pos)
editor.chooseCaretX()
elif editor.getTextRange(start_of_search_text_pos, cp) == search_text_S:
editor.beginUndoAction()
editor.deleteRange(start_of_search_text_pos, search_text_length)
editor.insertText(start_of_search_text_pos, replacement_S)
editor.endUndoAction()
end_of_search_text_pos = start_of_search_text_pos + len(replacement_S)
editor.setCurrentPos(end_of_search_text_pos)
editor.setSelection(end_of_search_text_pos, end_of_search_text_pos)
editor.chooseCaretX()
editor.callback(callback_sci_CHARADDED, [SCINTILLANOTIFICATION.CHARADDED])
| 71.340426
| 683
| 0.643901
| 1,257
| 10,059
| 4.743835
| 0.107399
| 0.226396
| 0.195204
| 0.244005
| 0.811504
| 0.794734
| 0.78702
| 0.780647
| 0.767231
| 0.767231
| 0
| 0.007335
| 0.254598
| 10,059
| 141
| 684
| 71.340426
| 0.787943
| 0.010041
| 0
| 0.521739
| 0
| 0.072464
| 0.256428
| 0.017276
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007246
| false
| 0
| 0
| 0
| 0.007246
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4db98cefd96dcf6bdd65020c0f7ddc4ddecd583c
| 224
|
py
|
Python
|
pycofe/i2reports/core/CCP4ErrorHandling.py
|
ekr-ccp4/jsCoFE
|
b9424733fb567938927509bc667ef24ed60ddd8c
|
[
"MIT"
] | null | null | null |
pycofe/i2reports/core/CCP4ErrorHandling.py
|
ekr-ccp4/jsCoFE
|
b9424733fb567938927509bc667ef24ed60ddd8c
|
[
"MIT"
] | null | null | null |
pycofe/i2reports/core/CCP4ErrorHandling.py
|
ekr-ccp4/jsCoFE
|
b9424733fb567938927509bc667ef24ed60ddd8c
|
[
"MIT"
] | 1
|
2021-02-25T06:54:15.000Z
|
2021-02-25T06:54:15.000Z
|
SEVERITY_WARNING = 2
SEVERITY_OK = 0
class CException(Exception):
def __init__(self, *args, **kwdargs):
pass
def extend(self, *args, **kwdargs):
pass
def maxSeverity(self, *args, **kwdargs):
return 0
| 14
| 42
| 0.65625
| 28
| 224
| 5.035714
| 0.607143
| 0.170213
| 0.319149
| 0.269504
| 0.312057
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017045
| 0.214286
| 224
| 15
| 43
| 14.933333
| 0.784091
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.222222
| 0
| 0.111111
| 0.555556
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
4dd52570ff78805f847df568a51a8dc7f5597e3d
| 31,868
|
py
|
Python
|
sdk/python/pulumi_ns1/record.py
|
pulumi/pulumi-ns1
|
7200ab674c814fd18f8b59a90ee130574df4eafc
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_ns1/record.py
|
pulumi/pulumi-ns1
|
7200ab674c814fd18f8b59a90ee130574df4eafc
|
[
"ECL-2.0",
"Apache-2.0"
] | 43
|
2020-06-24T11:18:00.000Z
|
2022-03-31T15:37:47.000Z
|
sdk/python/pulumi_ns1/record.py
|
pulumi/pulumi-ns1
|
7200ab674c814fd18f8b59a90ee130574df4eafc
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-01-12T23:15:35.000Z
|
2021-01-12T23:15:35.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RecordArgs', 'Record']
@pulumi.input_type
class RecordArgs:
def __init__(__self__, *,
domain: pulumi.Input[str],
type: pulumi.Input[str],
zone: pulumi.Input[str],
answers: Optional[pulumi.Input[Sequence[pulumi.Input['RecordAnswerArgs']]]] = None,
filters: Optional[pulumi.Input[Sequence[pulumi.Input['RecordFilterArgs']]]] = None,
link: Optional[pulumi.Input[str]] = None,
meta: Optional[pulumi.Input[Mapping[str, Any]]] = None,
regions: Optional[pulumi.Input[Sequence[pulumi.Input['RecordRegionArgs']]]] = None,
short_answers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ttl: Optional[pulumi.Input[int]] = None,
use_client_subnet: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Record resource.
:param pulumi.Input[str] domain: The records' domain. Cannot have leading or trailing
dots - see the example above and `FQDN formatting` below.
:param pulumi.Input[str] type: The records' RR type.
:param pulumi.Input[str] zone: The zone the record belongs to. Cannot have leading or
trailing dots (".") - see the example above and `FQDN formatting` below.
:param pulumi.Input[Sequence[pulumi.Input['RecordAnswerArgs']]] answers: One or more NS1 answers for the records' specified type.
Answers are documented below.
:param pulumi.Input[Sequence[pulumi.Input['RecordFilterArgs']]] filters: One or more NS1 filters for the record(order matters).
Filters are documented below.
:param pulumi.Input[str] link: The target record to link to. This means this record is a
'linked' record, and it inherits all properties from its target.
:param pulumi.Input[Sequence[pulumi.Input['RecordRegionArgs']]] regions: One or more "regions" for the record. These are really
just groupings based on metadata, and are called "Answer Groups" in the NS1 UI,
but remain `regions` here for legacy reasons. Regions are
documented below. Please note the ordering requirement!
:param pulumi.Input[int] ttl: The records' time to live (in seconds).
:param pulumi.Input[bool] use_client_subnet: Whether to use EDNS client subnet data when
available(in filter chain).
* ` meta` - (Optional) meta is supported at the `record` level. Meta
is documented below.
"""
pulumi.set(__self__, "domain", domain)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "zone", zone)
if answers is not None:
pulumi.set(__self__, "answers", answers)
if filters is not None:
pulumi.set(__self__, "filters", filters)
if link is not None:
pulumi.set(__self__, "link", link)
if meta is not None:
pulumi.set(__self__, "meta", meta)
if regions is not None:
pulumi.set(__self__, "regions", regions)
if short_answers is not None:
warnings.warn("""short_answers will be deprecated in a future release. It is suggested to migrate to a regular \"answers\" block.""", DeprecationWarning)
pulumi.log.warn("""short_answers is deprecated: short_answers will be deprecated in a future release. It is suggested to migrate to a regular \"answers\" block.""")
if short_answers is not None:
pulumi.set(__self__, "short_answers", short_answers)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
if use_client_subnet is not None:
pulumi.set(__self__, "use_client_subnet", use_client_subnet)
@property
@pulumi.getter
def domain(self) -> pulumi.Input[str]:
"""
The records' domain. Cannot have leading or trailing
dots - see the example above and `FQDN formatting` below.
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: pulumi.Input[str]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The records' RR type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def zone(self) -> pulumi.Input[str]:
"""
The zone the record belongs to. Cannot have leading or
trailing dots (".") - see the example above and `FQDN formatting` below.
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: pulumi.Input[str]):
pulumi.set(self, "zone", value)
@property
@pulumi.getter
def answers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RecordAnswerArgs']]]]:
"""
One or more NS1 answers for the records' specified type.
Answers are documented below.
"""
return pulumi.get(self, "answers")
@answers.setter
def answers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RecordAnswerArgs']]]]):
pulumi.set(self, "answers", value)
@property
@pulumi.getter
def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RecordFilterArgs']]]]:
"""
One or more NS1 filters for the record(order matters).
Filters are documented below.
"""
return pulumi.get(self, "filters")
@filters.setter
def filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RecordFilterArgs']]]]):
pulumi.set(self, "filters", value)
@property
@pulumi.getter
def link(self) -> Optional[pulumi.Input[str]]:
"""
The target record to link to. This means this record is a
'linked' record, and it inherits all properties from its target.
"""
return pulumi.get(self, "link")
@link.setter
def link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "link", value)
@property
@pulumi.getter
def meta(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "meta")
@meta.setter
def meta(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "meta", value)
@property
@pulumi.getter
def regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RecordRegionArgs']]]]:
"""
One or more "regions" for the record. These are really
just groupings based on metadata, and are called "Answer Groups" in the NS1 UI,
but remain `regions` here for legacy reasons. Regions are
documented below. Please note the ordering requirement!
"""
return pulumi.get(self, "regions")
@regions.setter
def regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RecordRegionArgs']]]]):
pulumi.set(self, "regions", value)
@property
@pulumi.getter(name="shortAnswers")
def short_answers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "short_answers")
@short_answers.setter
def short_answers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "short_answers", value)
@property
@pulumi.getter
def ttl(self) -> Optional[pulumi.Input[int]]:
"""
The records' time to live (in seconds).
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ttl", value)
@property
@pulumi.getter(name="useClientSubnet")
def use_client_subnet(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to use EDNS client subnet data when
available(in filter chain).
* ` meta` - (Optional) meta is supported at the `record` level. Meta
is documented below.
"""
return pulumi.get(self, "use_client_subnet")
@use_client_subnet.setter
def use_client_subnet(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_client_subnet", value)
@pulumi.input_type
class _RecordState:
def __init__(__self__, *,
answers: Optional[pulumi.Input[Sequence[pulumi.Input['RecordAnswerArgs']]]] = None,
domain: Optional[pulumi.Input[str]] = None,
filters: Optional[pulumi.Input[Sequence[pulumi.Input['RecordFilterArgs']]]] = None,
link: Optional[pulumi.Input[str]] = None,
meta: Optional[pulumi.Input[Mapping[str, Any]]] = None,
regions: Optional[pulumi.Input[Sequence[pulumi.Input['RecordRegionArgs']]]] = None,
short_answers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ttl: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
use_client_subnet: Optional[pulumi.Input[bool]] = None,
zone: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Record resources.
:param pulumi.Input[Sequence[pulumi.Input['RecordAnswerArgs']]] answers: One or more NS1 answers for the records' specified type.
Answers are documented below.
:param pulumi.Input[str] domain: The records' domain. Cannot have leading or trailing
dots - see the example above and `FQDN formatting` below.
:param pulumi.Input[Sequence[pulumi.Input['RecordFilterArgs']]] filters: One or more NS1 filters for the record(order matters).
Filters are documented below.
:param pulumi.Input[str] link: The target record to link to. This means this record is a
'linked' record, and it inherits all properties from its target.
:param pulumi.Input[Sequence[pulumi.Input['RecordRegionArgs']]] regions: One or more "regions" for the record. These are really
just groupings based on metadata, and are called "Answer Groups" in the NS1 UI,
but remain `regions` here for legacy reasons. Regions are
documented below. Please note the ordering requirement!
:param pulumi.Input[int] ttl: The records' time to live (in seconds).
:param pulumi.Input[str] type: The records' RR type.
:param pulumi.Input[bool] use_client_subnet: Whether to use EDNS client subnet data when
available(in filter chain).
* ` meta` - (Optional) meta is supported at the `record` level. Meta
is documented below.
:param pulumi.Input[str] zone: The zone the record belongs to. Cannot have leading or
trailing dots (".") - see the example above and `FQDN formatting` below.
"""
if answers is not None:
pulumi.set(__self__, "answers", answers)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if filters is not None:
pulumi.set(__self__, "filters", filters)
if link is not None:
pulumi.set(__self__, "link", link)
if meta is not None:
pulumi.set(__self__, "meta", meta)
if regions is not None:
pulumi.set(__self__, "regions", regions)
if short_answers is not None:
warnings.warn("""short_answers will be deprecated in a future release. It is suggested to migrate to a regular \"answers\" block.""", DeprecationWarning)
pulumi.log.warn("""short_answers is deprecated: short_answers will be deprecated in a future release. It is suggested to migrate to a regular \"answers\" block.""")
if short_answers is not None:
pulumi.set(__self__, "short_answers", short_answers)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
if type is not None:
pulumi.set(__self__, "type", type)
if use_client_subnet is not None:
pulumi.set(__self__, "use_client_subnet", use_client_subnet)
if zone is not None:
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter
def answers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RecordAnswerArgs']]]]:
"""
One or more NS1 answers for the records' specified type.
Answers are documented below.
"""
return pulumi.get(self, "answers")
@answers.setter
def answers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RecordAnswerArgs']]]]):
pulumi.set(self, "answers", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
"""
The records' domain. Cannot have leading or trailing
dots - see the example above and `FQDN formatting` below.
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter
def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RecordFilterArgs']]]]:
"""
One or more NS1 filters for the record(order matters).
Filters are documented below.
"""
return pulumi.get(self, "filters")
@filters.setter
def filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RecordFilterArgs']]]]):
pulumi.set(self, "filters", value)
@property
@pulumi.getter
def link(self) -> Optional[pulumi.Input[str]]:
"""
The target record to link to. This means this record is a
'linked' record, and it inherits all properties from its target.
"""
return pulumi.get(self, "link")
@link.setter
def link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "link", value)
@property
@pulumi.getter
def meta(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "meta")
@meta.setter
def meta(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "meta", value)
@property
@pulumi.getter
def regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RecordRegionArgs']]]]:
"""
One or more "regions" for the record. These are really
just groupings based on metadata, and are called "Answer Groups" in the NS1 UI,
but remain `regions` here for legacy reasons. Regions are
documented below. Please note the ordering requirement!
"""
return pulumi.get(self, "regions")
@regions.setter
def regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RecordRegionArgs']]]]):
pulumi.set(self, "regions", value)
@property
@pulumi.getter(name="shortAnswers")
def short_answers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "short_answers")
@short_answers.setter
def short_answers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "short_answers", value)
@property
@pulumi.getter
def ttl(self) -> Optional[pulumi.Input[int]]:
"""
The records' time to live (in seconds).
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ttl", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The records' RR type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="useClientSubnet")
def use_client_subnet(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to use EDNS client subnet data when
available(in filter chain).
* ` meta` - (Optional) meta is supported at the `record` level. Meta
is documented below.
"""
return pulumi.get(self, "use_client_subnet")
@use_client_subnet.setter
def use_client_subnet(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_client_subnet", value)
@property
@pulumi.getter
def zone(self) -> Optional[pulumi.Input[str]]:
"""
The zone the record belongs to. Cannot have leading or
trailing dots (".") - see the example above and `FQDN formatting` below.
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone", value)
class Record(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
answers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordAnswerArgs']]]]] = None,
domain: Optional[pulumi.Input[str]] = None,
filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordFilterArgs']]]]] = None,
link: Optional[pulumi.Input[str]] = None,
meta: Optional[pulumi.Input[Mapping[str, Any]]] = None,
regions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordRegionArgs']]]]] = None,
short_answers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ttl: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
use_client_subnet: Optional[pulumi.Input[bool]] = None,
zone: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a NS1 Record resource. This can be used to create, modify, and delete records.
## NS1 Documentation
[Record Api Doc](https://ns1.com/api#records)
## Import
```sh
$ pulumi import ns1:index/record:Record <name> <zone>/<domain>/<type>`
```
So for the example above
```sh
$ pulumi import ns1:index/record:Record www terraform.example.io/www.terraform.example.io/CNAME`
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordAnswerArgs']]]] answers: One or more NS1 answers for the records' specified type.
Answers are documented below.
:param pulumi.Input[str] domain: The records' domain. Cannot have leading or trailing
dots - see the example above and `FQDN formatting` below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordFilterArgs']]]] filters: One or more NS1 filters for the record(order matters).
Filters are documented below.
:param pulumi.Input[str] link: The target record to link to. This means this record is a
'linked' record, and it inherits all properties from its target.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordRegionArgs']]]] regions: One or more "regions" for the record. These are really
just groupings based on metadata, and are called "Answer Groups" in the NS1 UI,
but remain `regions` here for legacy reasons. Regions are
documented below. Please note the ordering requirement!
:param pulumi.Input[int] ttl: The records' time to live (in seconds).
:param pulumi.Input[str] type: The records' RR type.
:param pulumi.Input[bool] use_client_subnet: Whether to use EDNS client subnet data when
available(in filter chain).
* ` meta` - (Optional) meta is supported at the `record` level. Meta
is documented below.
:param pulumi.Input[str] zone: The zone the record belongs to. Cannot have leading or
trailing dots (".") - see the example above and `FQDN formatting` below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RecordArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a NS1 Record resource. This can be used to create, modify, and delete records.
## NS1 Documentation
[Record Api Doc](https://ns1.com/api#records)
## Import
```sh
$ pulumi import ns1:index/record:Record <name> <zone>/<domain>/<type>`
```
So for the example above
```sh
$ pulumi import ns1:index/record:Record www terraform.example.io/www.terraform.example.io/CNAME`
```
:param str resource_name: The name of the resource.
:param RecordArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RecordArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
answers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordAnswerArgs']]]]] = None,
domain: Optional[pulumi.Input[str]] = None,
filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordFilterArgs']]]]] = None,
link: Optional[pulumi.Input[str]] = None,
meta: Optional[pulumi.Input[Mapping[str, Any]]] = None,
regions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordRegionArgs']]]]] = None,
short_answers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ttl: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
use_client_subnet: Optional[pulumi.Input[bool]] = None,
zone: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RecordArgs.__new__(RecordArgs)
__props__.__dict__["answers"] = answers
if domain is None and not opts.urn:
raise TypeError("Missing required property 'domain'")
__props__.__dict__["domain"] = domain
__props__.__dict__["filters"] = filters
__props__.__dict__["link"] = link
__props__.__dict__["meta"] = meta
__props__.__dict__["regions"] = regions
if short_answers is not None and not opts.urn:
warnings.warn("""short_answers will be deprecated in a future release. It is suggested to migrate to a regular \"answers\" block.""", DeprecationWarning)
pulumi.log.warn("""short_answers is deprecated: short_answers will be deprecated in a future release. It is suggested to migrate to a regular \"answers\" block.""")
__props__.__dict__["short_answers"] = short_answers
__props__.__dict__["ttl"] = ttl
if type is None and not opts.urn:
raise TypeError("Missing required property 'type'")
__props__.__dict__["type"] = type
__props__.__dict__["use_client_subnet"] = use_client_subnet
if zone is None and not opts.urn:
raise TypeError("Missing required property 'zone'")
__props__.__dict__["zone"] = zone
super(Record, __self__).__init__(
'ns1:index/record:Record',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
answers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordAnswerArgs']]]]] = None,
domain: Optional[pulumi.Input[str]] = None,
filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordFilterArgs']]]]] = None,
link: Optional[pulumi.Input[str]] = None,
meta: Optional[pulumi.Input[Mapping[str, Any]]] = None,
regions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordRegionArgs']]]]] = None,
short_answers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ttl: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
use_client_subnet: Optional[pulumi.Input[bool]] = None,
zone: Optional[pulumi.Input[str]] = None) -> 'Record':
"""
Get an existing Record resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordAnswerArgs']]]] answers: One or more NS1 answers for the records' specified type.
Answers are documented below.
:param pulumi.Input[str] domain: The records' domain. Cannot have leading or trailing
dots - see the example above and `FQDN formatting` below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordFilterArgs']]]] filters: One or more NS1 filters for the record(order matters).
Filters are documented below.
:param pulumi.Input[str] link: The target record to link to. This means this record is a
'linked' record, and it inherits all properties from its target.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecordRegionArgs']]]] regions: One or more "regions" for the record. These are really
just groupings based on metadata, and are called "Answer Groups" in the NS1 UI,
but remain `regions` here for legacy reasons. Regions are
documented below. Please note the ordering requirement!
:param pulumi.Input[int] ttl: The records' time to live (in seconds).
:param pulumi.Input[str] type: The records' RR type.
:param pulumi.Input[bool] use_client_subnet: Whether to use EDNS client subnet data when
available(in filter chain).
* ` meta` - (Optional) meta is supported at the `record` level. Meta
is documented below.
:param pulumi.Input[str] zone: The zone the record belongs to. Cannot have leading or
trailing dots (".") - see the example above and `FQDN formatting` below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RecordState.__new__(_RecordState)
__props__.__dict__["answers"] = answers
__props__.__dict__["domain"] = domain
__props__.__dict__["filters"] = filters
__props__.__dict__["link"] = link
__props__.__dict__["meta"] = meta
__props__.__dict__["regions"] = regions
__props__.__dict__["short_answers"] = short_answers
__props__.__dict__["ttl"] = ttl
__props__.__dict__["type"] = type
__props__.__dict__["use_client_subnet"] = use_client_subnet
__props__.__dict__["zone"] = zone
return Record(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def answers(self) -> pulumi.Output[Optional[Sequence['outputs.RecordAnswer']]]:
"""
One or more NS1 answers for the records' specified type.
Answers are documented below.
"""
return pulumi.get(self, "answers")
@property
@pulumi.getter
def domain(self) -> pulumi.Output[str]:
"""
The records' domain. Cannot have leading or trailing
dots - see the example above and `FQDN formatting` below.
"""
return pulumi.get(self, "domain")
@property
@pulumi.getter
def filters(self) -> pulumi.Output[Optional[Sequence['outputs.RecordFilter']]]:
"""
One or more NS1 filters for the record(order matters).
Filters are documented below.
"""
return pulumi.get(self, "filters")
@property
@pulumi.getter
def link(self) -> pulumi.Output[Optional[str]]:
"""
The target record to link to. This means this record is a
'linked' record, and it inherits all properties from its target.
"""
return pulumi.get(self, "link")
@property
@pulumi.getter
def meta(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
return pulumi.get(self, "meta")
@property
@pulumi.getter
def regions(self) -> pulumi.Output[Optional[Sequence['outputs.RecordRegion']]]:
"""
One or more "regions" for the record. These are really
just groupings based on metadata, and are called "Answer Groups" in the NS1 UI,
but remain `regions` here for legacy reasons. Regions are
documented below. Please note the ordering requirement!
"""
return pulumi.get(self, "regions")
@property
@pulumi.getter(name="shortAnswers")
def short_answers(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "short_answers")
@property
@pulumi.getter
def ttl(self) -> pulumi.Output[int]:
"""
The records' time to live (in seconds).
"""
return pulumi.get(self, "ttl")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The records' RR type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="useClientSubnet")
def use_client_subnet(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to use EDNS client subnet data when
available(in filter chain).
* ` meta` - (Optional) meta is supported at the `record` level. Meta
is documented below.
"""
return pulumi.get(self, "use_client_subnet")
@property
@pulumi.getter
def zone(self) -> pulumi.Output[str]:
"""
The zone the record belongs to. Cannot have leading or
trailing dots (".") - see the example above and `FQDN formatting` below.
"""
return pulumi.get(self, "zone")
| 44.570629
| 180
| 0.628593
| 3,779
| 31,868
| 5.165652
| 0.061657
| 0.105374
| 0.087598
| 0.061472
| 0.911121
| 0.895805
| 0.874443
| 0.857026
| 0.849086
| 0.835357
| 0
| 0.001402
| 0.26161
| 31,868
| 714
| 181
| 44.633053
| 0.828184
| 0.341157
| 0
| 0.794872
| 1
| 0.015385
| 0.12113
| 0.001203
| 0
| 0
| 0
| 0
| 0
| 1
| 0.158974
| false
| 0.002564
| 0.017949
| 0.015385
| 0.271795
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
150f6a863a94f9f08d4ad2130b044277a49cff67
| 149
|
py
|
Python
|
django/main/settings_markdown.py
|
mnieber/shared-goal
|
3ccdec341a3d542dbc108ad5375c309322f91d96
|
[
"Apache-2.0"
] | null | null | null |
django/main/settings_markdown.py
|
mnieber/shared-goal
|
3ccdec341a3d542dbc108ad5375c309322f91d96
|
[
"Apache-2.0"
] | null | null | null |
django/main/settings_markdown.py
|
mnieber/shared-goal
|
3ccdec341a3d542dbc108ad5375c309322f91d96
|
[
"Apache-2.0"
] | null | null | null |
# from markdown_deux.conf.settings import MARKDOWN_DEUX_DEFAULT_STYLE
# MARKDOWN_DEUX_STYLES = {
# "default": MARKDOWN_DEUX_DEFAULT_STYLE,
# }
| 21.285714
| 69
| 0.778523
| 18
| 149
| 5.944444
| 0.5
| 0.448598
| 0.35514
| 0.448598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134228
| 149
| 6
| 70
| 24.833333
| 0.829457
| 0.926175
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
12788fe6e4cbb193f6638d0484569f5d5f886a2d
| 166
|
py
|
Python
|
HWHMBBF/views.py
|
HLoveMe/HWMBBF_Serve
|
a11fb5b67c913b62df839ce3438a3be433e3865b
|
[
"Apache-2.0"
] | null | null | null |
HWHMBBF/views.py
|
HLoveMe/HWMBBF_Serve
|
a11fb5b67c913b62df839ce3438a3be433e3865b
|
[
"Apache-2.0"
] | null | null | null |
HWHMBBF/views.py
|
HLoveMe/HWMBBF_Serve
|
a11fb5b67c913b62df839ce3438a3be433e3865b
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render,render_to_response
from django.template import Context, Template
#项目
def index(res):
return render_to_response("index.html")
| 23.714286
| 54
| 0.807229
| 24
| 166
| 5.416667
| 0.625
| 0.153846
| 0.246154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114458
| 166
| 7
| 55
| 23.714286
| 0.884354
| 0.012048
| 0
| 0
| 0
| 0
| 0.060976
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
128b90a5fdf7c31f9bbf1863319ef18bcc034f63
| 197
|
py
|
Python
|
base/models.py
|
AltairStar/prefinal
|
f1daa21c6b8f069ceb659587e3ac85ad71871f45
|
[
"MIT"
] | null | null | null |
base/models.py
|
AltairStar/prefinal
|
f1daa21c6b8f069ceb659587e3ac85ad71871f45
|
[
"MIT"
] | null | null | null |
base/models.py
|
AltairStar/prefinal
|
f1daa21c6b8f069ceb659587e3ac85ad71871f45
|
[
"MIT"
] | null | null | null |
from django.db import models
class Images(models.Model):
path = models.CharField(max_length=200)
result = models.CharField(max_length=50)
def __str__(self):
return self.path
| 19.7
| 44
| 0.705584
| 27
| 197
| 4.925926
| 0.703704
| 0.225564
| 0.270677
| 0.360902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031646
| 0.19797
| 197
| 9
| 45
| 21.888889
| 0.810127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0.166667
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
12f49296ac67404edbcd051ddc84f7a6b435e313
| 17,917
|
py
|
Python
|
sdk/policyinsights/azure-mgmt-policyinsights/azure/mgmt/policyinsights/operations/policy_tracked_resources_operations.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
sdk/policyinsights/azure-mgmt-policyinsights/azure/mgmt/policyinsights/operations/policy_tracked_resources_operations.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
sdk/policyinsights/azure-mgmt-policyinsights/azure/mgmt/policyinsights/operations/policy_tracked_resources_operations.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class PolicyTrackedResourcesOperations(object):
"""PolicyTrackedResourcesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar management_groups_namespace: The namespace for Microsoft Management RP; only "Microsoft.Management" is allowed. Constant value: "Microsoft.Management".
:ivar policy_tracked_resources_resource: The name of the virtual resource under PolicyTrackedResources resource type; only "default" is allowed. Constant value: "default".
:ivar api_version: Client Api Version. Constant value: "2018-07-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.management_groups_namespace = "Microsoft.Management"
self.policy_tracked_resources_resource = "default"
self.api_version = "2018-07-01-preview"
self.config = config
def list_query_results_for_management_group(
self, management_group_name, query_options=None, custom_headers=None, raw=False, **operation_config):
"""Queries policy tracked resources under the management group.
:param management_group_name: Management group name.
:type management_group_name: str
:param query_options: Additional parameters for the operation
:type query_options: ~azure.mgmt.policyinsights.models.QueryOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicyTrackedResource
:rtype:
~azure.mgmt.policyinsights.models.PolicyTrackedResourcePaged[~azure.mgmt.policyinsights.models.PolicyTrackedResource]
:raises:
:class:`QueryFailureException<azure.mgmt.policyinsights.models.QueryFailureException>`
"""
top = None
if query_options is not None:
top = query_options.top
filter = None
if query_options is not None:
filter = query_options.filter
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_query_results_for_management_group.metadata['url']
path_format_arguments = {
'managementGroupsNamespace': self._serialize.url("self.management_groups_namespace", self.management_groups_namespace, 'str'),
'managementGroupName': self._serialize.url("management_group_name", management_group_name, 'str'),
'policyTrackedResourcesResource': self._serialize.url("self.policy_tracked_resources_resource", self.policy_tracked_resources_resource, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=0)
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.QueryFailureException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.PolicyTrackedResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyTrackedResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_query_results_for_management_group.metadata = {'url': '/providers/{managementGroupsNamespace}/managementGroups/{managementGroupName}/providers/Microsoft.PolicyInsights/policyTrackedResources/{policyTrackedResourcesResource}/queryResults'}
def list_query_results_for_subscription(
self, subscription_id, query_options=None, custom_headers=None, raw=False, **operation_config):
"""Queries policy tracked resources under the subscription.
:param subscription_id: Microsoft Azure subscription ID.
:type subscription_id: str
:param query_options: Additional parameters for the operation
:type query_options: ~azure.mgmt.policyinsights.models.QueryOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicyTrackedResource
:rtype:
~azure.mgmt.policyinsights.models.PolicyTrackedResourcePaged[~azure.mgmt.policyinsights.models.PolicyTrackedResource]
:raises:
:class:`QueryFailureException<azure.mgmt.policyinsights.models.QueryFailureException>`
"""
top = None
if query_options is not None:
top = query_options.top
filter = None
if query_options is not None:
filter = query_options.filter
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_query_results_for_subscription.metadata['url']
path_format_arguments = {
'policyTrackedResourcesResource': self._serialize.url("self.policy_tracked_resources_resource", self.policy_tracked_resources_resource, 'str'),
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=0)
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.QueryFailureException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.PolicyTrackedResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyTrackedResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_query_results_for_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.PolicyInsights/policyTrackedResources/{policyTrackedResourcesResource}/queryResults'}
def list_query_results_for_resource_group(
self, resource_group_name, subscription_id, query_options=None, custom_headers=None, raw=False, **operation_config):
"""Queries policy tracked resources under the resource group.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param subscription_id: Microsoft Azure subscription ID.
:type subscription_id: str
:param query_options: Additional parameters for the operation
:type query_options: ~azure.mgmt.policyinsights.models.QueryOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicyTrackedResource
:rtype:
~azure.mgmt.policyinsights.models.PolicyTrackedResourcePaged[~azure.mgmt.policyinsights.models.PolicyTrackedResource]
:raises:
:class:`QueryFailureException<azure.mgmt.policyinsights.models.QueryFailureException>`
"""
top = None
if query_options is not None:
top = query_options.top
filter = None
if query_options is not None:
filter = query_options.filter
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_query_results_for_resource_group.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyTrackedResourcesResource': self._serialize.url("self.policy_tracked_resources_resource", self.policy_tracked_resources_resource, 'str'),
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=0)
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.QueryFailureException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.PolicyTrackedResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyTrackedResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_query_results_for_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PolicyInsights/policyTrackedResources/{policyTrackedResourcesResource}/queryResults'}
def list_query_results_for_resource(
self, resource_id, query_options=None, custom_headers=None, raw=False, **operation_config):
"""Queries policy tracked resources under the resource.
:param resource_id: Resource ID.
:type resource_id: str
:param query_options: Additional parameters for the operation
:type query_options: ~azure.mgmt.policyinsights.models.QueryOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicyTrackedResource
:rtype:
~azure.mgmt.policyinsights.models.PolicyTrackedResourcePaged[~azure.mgmt.policyinsights.models.PolicyTrackedResource]
:raises:
:class:`QueryFailureException<azure.mgmt.policyinsights.models.QueryFailureException>`
"""
top = None
if query_options is not None:
top = query_options.top
filter = None
if query_options is not None:
filter = query_options.filter
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_query_results_for_resource.metadata['url']
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
'policyTrackedResourcesResource': self._serialize.url("self.policy_tracked_resources_resource", self.policy_tracked_resources_resource, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=0)
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.QueryFailureException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.PolicyTrackedResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyTrackedResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_query_results_for_resource.metadata = {'url': '/{resourceId}/providers/Microsoft.PolicyInsights/policyTrackedResources/{policyTrackedResourcesResource}/queryResults'}
| 49.494475
| 247
| 0.661216
| 1,794
| 17,917
| 6.383501
| 0.102007
| 0.02934
| 0.015718
| 0.040517
| 0.852951
| 0.845355
| 0.819246
| 0.813308
| 0.805973
| 0.805973
| 0
| 0.00275
| 0.248981
| 17,917
| 361
| 248
| 49.631579
| 0.84832
| 0.266562
| 0
| 0.810256
| 0
| 0
| 0.140684
| 0.091532
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046154
| false
| 0
| 0.015385
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
12f54550fdd940883cc9b840f9951b2373c427de
| 203
|
py
|
Python
|
ambari-agent/src/main/python/resource_management/libraries/resources/__init__.py
|
boydos/incubator-ambari
|
e10d85756dd55729c20aeda2baa0d6c93c4ca31d
|
[
"Apache-2.0"
] | 2
|
2018-06-06T14:21:11.000Z
|
2018-06-06T14:22:50.000Z
|
ambari-agent/src/main/python/resource_management/libraries/resources/__init__.py
|
boydos/incubator-ambari
|
e10d85756dd55729c20aeda2baa0d6c93c4ca31d
|
[
"Apache-2.0"
] | null | null | null |
ambari-agent/src/main/python/resource_management/libraries/resources/__init__.py
|
boydos/incubator-ambari
|
e10d85756dd55729c20aeda2baa0d6c93c4ca31d
|
[
"Apache-2.0"
] | null | null | null |
from resource_management.libraries.resources.execute_hadoop import *
from resource_management.libraries.resources.template_config import *
from resource_management.libraries.resources.xml_config import *
| 67.666667
| 69
| 0.8867
| 24
| 203
| 7.25
| 0.458333
| 0.206897
| 0.37931
| 0.534483
| 0.758621
| 0.528736
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054187
| 203
| 3
| 70
| 67.666667
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
12fc7e6c34171ccfefa0e5ae27464a19a83d4c93
| 965
|
py
|
Python
|
museum_site/core/decorators.py
|
DrDos0016/z2
|
b63e77129fefcb4f990ee1cb9952f4f708ee3a2b
|
[
"MIT"
] | 3
|
2017-05-01T19:53:57.000Z
|
2018-08-27T20:14:43.000Z
|
museum_site/core/decorators.py
|
DrDos0016/z2
|
b63e77129fefcb4f990ee1cb9952f4f708ee3a2b
|
[
"MIT"
] | null | null | null |
museum_site/core/decorators.py
|
DrDos0016/z2
|
b63e77129fefcb4f990ee1cb9952f4f708ee3a2b
|
[
"MIT"
] | 1
|
2018-08-27T20:14:46.000Z
|
2018-08-27T20:14:46.000Z
|
def dev_only(func, *args, **kwargs):
def inner(*args, **kwargs):
request = kwargs.get("request", args[0])
# Check host
host = request.get_host()
if env_from_host(host) != "DEV":
raise Http404
else:
return func(*args, **kwargs)
return inner
def non_production(func, *args, **kwargs):
def inner(*args, **kwargs):
request = kwargs.get("request", args[0])
# Check host
host = request.get_host()
if env_from_host(host) not in ["DEV", "BETA"]:
raise Http404
else:
return func(*args, **kwargs)
return inner
def prod_only(func, *args, **kwargs):
def inner(*args, **kwargs):
request = kwargs.get("request", args[0])
# Check host
host = request.get_host()
if env_from_host(host) != "PROD":
raise Http404
else:
return func(*args, **kwargs)
return inner
| 25.394737
| 54
| 0.539896
| 114
| 965
| 4.464912
| 0.219298
| 0.176817
| 0.165029
| 0.100196
| 0.917485
| 0.917485
| 0.917485
| 0.917485
| 0.917485
| 0.825147
| 0
| 0.018377
| 0.323316
| 965
| 37
| 55
| 26.081081
| 0.761103
| 0.033161
| 0
| 0.777778
| 0
| 0
| 0.037675
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4241c35624d2c366a2779a12eed0c016e5650305
| 3,306
|
py
|
Python
|
tests/rules/common.py
|
babenek/CredSweeper
|
4d69ec934b45fd2f68e00b636077e5edfd1ff6ca
|
[
"MIT"
] | 17
|
2021-10-22T00:29:46.000Z
|
2022-03-21T03:05:56.000Z
|
tests/rules/common.py
|
babenek/CredSweeper
|
4d69ec934b45fd2f68e00b636077e5edfd1ff6ca
|
[
"MIT"
] | 29
|
2021-11-05T21:10:51.000Z
|
2022-03-30T10:41:08.000Z
|
tests/rules/common.py
|
babenek/CredSweeper
|
4d69ec934b45fd2f68e00b636077e5edfd1ff6ca
|
[
"MIT"
] | 16
|
2021-11-05T20:39:54.000Z
|
2022-03-11T00:57:32.000Z
|
import pytest
from typing import List
from credsweeper.file_handler.analysis_target import AnalysisTarget
class BaseTestRule:
def test_scan_p(self, file_path: pytest.fixture, lines: pytest.fixture,
scanner_without_filters: pytest.fixture) -> None:
targets = [AnalysisTarget(line, i + 1, lines, file_path) for i, line in enumerate(lines)]
assert len(scanner_without_filters.scan(targets)) == 1
@pytest.mark.parametrize("lines", [[""], ["String secret = new String()"], ["SZa6TWGF2XuWdl7c2s2xB1iSlnZJLbvH"]])
def test_scan_n(self, file_path: pytest.fixture, lines: List[str], scanner: pytest.fixture) -> None:
targets = [AnalysisTarget(line, i + 1, lines, file_path) for i, line in enumerate(lines)]
assert len(scanner.scan(targets)) == 0
class BaseTestNoQuotesRule:
"""
If secret declared in a code file (".cpp", ".py", etc) in should be escaped with quotes. Otherwise it cannot be a
string secret, as no string literal declared.
Exceptions: comments. In comment secret can be unquoted
This test checks if unquoted password is not comment and declared in code file.
"""
def test_scan_quote_p(self, file_path: pytest.fixture, lines: pytest.fixture, scanner: pytest.fixture) -> None:
targets = [AnalysisTarget(line, i + 1, lines, file_path) for i, line in enumerate(lines)]
assert len(scanner.scan(targets)) == 1
def test_scan_quote_n(self, python_file_path: pytest.fixture, lines: pytest.fixture,
scanner: pytest.fixture) -> None:
targets = [AnalysisTarget(line, i + 1, lines, python_file_path) for i, line in enumerate(lines)]
assert len(scanner.scan(targets)) == 0
class BaseTestCommentRule:
"""
If secret declared in a code file (".cpp", ".py", etc) in should be escaped with quotes. Otherwise it cannot be a
string secret, as no string literal declared.
Exceptions: comments. In comment secret can be unquoted
This test checks if unquoted password is comment in code file
"""
def test_scan_comment_p(self, python_file_path: pytest.fixture, lines: pytest.fixture,
scanner: pytest.fixture) -> None:
targets = [AnalysisTarget(line, i + 1, lines, python_file_path) for i, line in enumerate(lines)]
assert len(scanner.scan(targets)) == 1
def test_scan_comment_n(self, python_file_path: pytest.fixture, lines: pytest.fixture,
scanner: pytest.fixture) -> None:
lines = [f"\\{line}" for line in lines]
targets = [AnalysisTarget(line, i + 1, lines, python_file_path) for i, line in enumerate(lines)]
assert len(scanner.scan(targets)) == 0
class BaseTestMultiRule:
def test_scan_line_data_p(self, file_path: pytest.fixture, lines: pytest.fixture, scanner: pytest.fixture) -> None:
targets = [AnalysisTarget(line, i + 1, lines, file_path) for i, line in enumerate(lines)]
assert len(scanner.scan(targets)[0].line_data_list) == 2
def test_scan_line_data_n(self, file_path: pytest.fixture, scanner: pytest.fixture) -> None:
lines = [""]
targets = [AnalysisTarget(line, i + 1, lines, file_path) for i, line in enumerate(lines)]
assert len(scanner.scan(targets)) == 0
| 47.228571
| 119
| 0.680278
| 443
| 3,306
| 4.954853
| 0.180587
| 0.130296
| 0.040091
| 0.076538
| 0.820957
| 0.803645
| 0.764465
| 0.740774
| 0.740774
| 0.740774
| 0
| 0.008833
| 0.212341
| 3,306
| 69
| 120
| 47.913043
| 0.834101
| 0.174531
| 0
| 0.447368
| 0
| 0
| 0.0273
| 0.011967
| 0
| 0
| 0
| 0
| 0.210526
| 1
| 0.210526
| false
| 0
| 0.078947
| 0
| 0.394737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
427787cbcb89dc3c0e8d47dea355eba1df4f6a22
| 4,382
|
py
|
Python
|
tests/test_plot_vars.py
|
mitchute/plot-eplusout-csv
|
91cd552bbdea79ffa970d44aad667981c43056fd
|
[
"MIT"
] | null | null | null |
tests/test_plot_vars.py
|
mitchute/plot-eplusout-csv
|
91cd552bbdea79ffa970d44aad667981c43056fd
|
[
"MIT"
] | null | null | null |
tests/test_plot_vars.py
|
mitchute/plot-eplusout-csv
|
91cd552bbdea79ffa970d44aad667981c43056fd
|
[
"MIT"
] | null | null | null |
import os
import tempfile
import unittest
from pathlib import Path
import pandas as pd
from src.plot_vars import plot, GenericError
class TestPlotVars(unittest.TestCase):
def test_plot_all_cols(self):
temp_dir = Path(tempfile.mkdtemp())
if not temp_dir.exists():
os.mkdir(temp_dir)
base_path = temp_dir / "base.csv"
mod_path = temp_dir / "mod.csv"
df_base = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
df_mod = pd.DataFrame({"A": [1.5, 2.5, 3.5], "B": [4.5, 5.5, 6.5]})
df_base.index.name = "Date/Time"
df_mod.index.name = "Date/Time"
df_base.to_csv(base_path)
df_mod.to_csv(mod_path)
plot(str(base_path), str(mod_path), plot_dir=str(temp_dir))
def test_plot_single_cols(self):
temp_dir = Path(tempfile.mkdtemp())
if not temp_dir.exists():
os.mkdir(temp_dir)
base_path = temp_dir / "base.csv"
mod_path = temp_dir / "mod.csv"
df_base = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
df_mod = pd.DataFrame({"A": [1.5, 2.5, 3.5], "B": [4.5, 5.5, 6.5]})
df_base.index.name = "Date/Time"
df_mod.index.name = "Date/Time"
df_base.to_csv(base_path)
df_mod.to_csv(mod_path)
plot(str(base_path), str(mod_path), cols="A", plot_dir=str(temp_dir))
def test_plot_cols_list(self):
temp_dir = Path(tempfile.mkdtemp())
if not temp_dir.exists():
os.mkdir(temp_dir)
base_path = temp_dir / "base.csv"
mod_path = temp_dir / "mod.csv"
df_base = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [0, 1, 2]})
df_mod = pd.DataFrame({"A": [1.5, 2.5, 3.5], "B": [4.5, 5.5, 6.5], "C": [0, 1, 2]})
df_base.index.name = "Date/Time"
df_mod.index.name = "Date/Time"
df_base.to_csv(base_path)
df_mod.to_csv(mod_path)
plot(str(base_path), str(mod_path), cols=["A", "B"], plot_dir=str(temp_dir))
def test_mismatched_rows(self):
temp_dir = Path(tempfile.mkdtemp())
if not temp_dir.exists():
os.mkdir(temp_dir)
base_path = temp_dir / "base.csv"
mod_path = temp_dir / "mod.csv"
df_base = pd.DataFrame({"A": [1, 2, 3]})
df_mod = pd.DataFrame({"A": [1.5, 2.5]})
df_base.index.name = "Date/Time"
df_mod.index.name = "Date/Time"
df_base.to_csv(base_path)
df_mod.to_csv(mod_path)
with self.assertRaises(GenericError):
plot(str(base_path), str(mod_path), plot_dir=str(temp_dir))
def test_mismatched_cols(self):
temp_dir = Path(tempfile.mkdtemp())
if not temp_dir.exists():
os.mkdir(temp_dir)
base_path = temp_dir / "base.csv"
mod_path = temp_dir / "mod.csv"
df_base = pd.DataFrame({"A": [1, 2, 3], "C": [1, 1, 1]})
df_mod = pd.DataFrame({"A": [1.5, 2.5, 3.5], "D": [1, 1, 1]})
df_base.index.name = "Date/Time"
df_mod.index.name = "Date/Time"
df_base.to_csv(base_path)
df_mod.to_csv(mod_path)
plot(str(base_path), str(mod_path), plot_dir=str(temp_dir))
def test_mismatched_cols_with_list_input(self):
temp_dir = Path(tempfile.mkdtemp())
if not temp_dir.exists():
os.mkdir(temp_dir)
base_path = temp_dir / "base.csv"
mod_path = temp_dir / "mod.csv"
df_base = pd.DataFrame({"A": [1, 2, 3], "C": [1, 1, 1]})
df_mod = pd.DataFrame({"A": [1.5, 2.5, 3.5], "D": [1, 1, 1]})
df_base.index.name = "Date/Time"
df_mod.index.name = "Date/Time"
df_base.to_csv(base_path)
df_mod.to_csv(mod_path)
plot(str(base_path), str(mod_path), cols=["A", "E"], plot_dir=str(temp_dir))
def test_plot_low_high_rows(self):
temp_dir = Path(tempfile.mkdtemp())
if not temp_dir.exists():
os.mkdir(temp_dir)
base_path = temp_dir / "base.csv"
mod_path = temp_dir / "mod.csv"
df_base = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
df_mod = pd.DataFrame({"A": [1.5, 2.5, 3.5], "B": [4.5, 5.5, 6.5]})
df_base.index.name = "Date/Time"
df_mod.index.name = "Date/Time"
df_base.to_csv(base_path)
df_mod.to_csv(mod_path)
plot(str(base_path), str(mod_path), low_row_num=1, high_row_num=2, plot_dir=str(temp_dir))
| 39.477477
| 98
| 0.571885
| 721
| 4,382
| 3.234397
| 0.087379
| 0.126072
| 0.066038
| 0.078045
| 0.894511
| 0.883791
| 0.883791
| 0.883791
| 0.843482
| 0.834906
| 0
| 0.035978
| 0.257873
| 4,382
| 110
| 99
| 39.836364
| 0.681119
| 0
| 0
| 0.767677
| 0
| 0
| 0.060246
| 0
| 0
| 0
| 0
| 0
| 0.010101
| 1
| 0.070707
| false
| 0
| 0.060606
| 0
| 0.141414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4279a56efa0b75735731fdc21731787c2b0ed646
| 34,755
|
py
|
Python
|
demos/analyzers/minist.py
|
supfisher/AirDL
|
086453c4e93a466e6bf968b5d66f7cecc0b0d2db
|
[
"MIT"
] | 1
|
2021-11-02T16:01:08.000Z
|
2021-11-02T16:01:08.000Z
|
demos/analyzers/minist.py
|
supfisher/AirDL
|
086453c4e93a466e6bf968b5d66f7cecc0b0d2db
|
[
"MIT"
] | null | null | null |
demos/analyzers/minist.py
|
supfisher/AirDL
|
086453c4e93a466e6bf968b5d66f7cecc0b0d2db
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
from utils import plot_xs_ys, parse_time_acc_loss, parse_energy, plot_xs_ys1_ys2, parse_all_energy
import matplotlib.pyplot as plt
current_path = '/Users/mag0a/Desktop/Github/FLinMEN/ns-3-allinone/ns-3-dev/contrib/distributed-ml-test/demos/'
plot_1 = False
plot_2 = False
plot_error1 = False
plot_error2 = True
plot_naughty1 = False
plot_naughty2 = False
plot_activeratio1 = False
plot_activeratio2 = True
plot_epoch1 = False
plot_epoch2 = True
plot_bs1 = False
plot_bs2 = True
plot_all1 = False
plot_all2 = True
plot_partition = False
global_time_w = {}
global_acc_w = {}
global_acc1_w = {}
global_energy_w = {}
global_legends = []
global_colors = []
stop_epoch = 24
def process_acc_energy(energy, acc):
energy_list = []
acc_list = []
for e, a in zip(energy, acc):
set_e = sorted(list(set(e)))
a = [a[e.index(v)] for v in set_e]
e = list(set_e)
energy_list.append(e)
acc_list.append(a)
return energy_list, acc_list
if __name__=="__main__":
if plot_1:
iters = 0
epoch_w, time_w, wall_clock_w, loss_w, acc_w = {}, {}, {}, {}, {}
legends = []
for sysCount in [1, 2, 8]:
for nActivePerCell in [1, 2, 4]:
local_epochs = 1
batch_size = 128
error_rate = 0
dir_ = os.path.join(current_path, "saved_minist/outputs-"+str(sysCount))
record_prefix = "-local_epochs-" + str(local_epochs) + \
"-batch_size-" + str(batch_size) + \
"-error_rate-" + str(error_rate) + \
"-nActivePerCell-" + str(nActivePerCell)
tf_dir = os.path.join(dir_, "tf"+record_prefix)
if os.path.exists(tf_dir):
legends.append(r"$C={:}$, $M={:}$".format(sysCount, nActivePerCell))
print("Read from ", tf_dir)
time_acc_loss_path = os.path.join(tf_dir, "time-acc-loss.txt")
epochs, times, wall_clocks, losses, accs = parse_time_acc_loss(time_acc_loss_path, stop_epoch=stop_epoch)
epoch_w[iters], time_w[iters], wall_clock_w[iters], loss_w[iters], acc_w[iters] = epochs, times, wall_clocks, losses, accs
iters += 1
plt.figure(figsize=[6.4, 2.4])
plt.subplot(121)
# plt.figure(1)
plot_xs_ys(epoch_w.values(), acc_w.values(), xlabel='Communication Round', ylabel="Acc (\%)", loc=4, show=False,
legends=legends)
plt.subplot(122)
# plt.figure(2)
plot_xs_ys(epoch_w.values(), time_w.values(), xlabel='Communication Round', ylabel="Elapsed Time (s)", loc=4, show=False,
legends=legends, logy=True)
plt.savefig(os.path.join(current_path, 'saved_results/minist-epoch-vs-acc-time-stopEpoch-{}.pdf'.format( stop_epoch)))
plt.show()
if plot_2:
iters = 0
time_w, time_avg_energy_w, time_sum_energy_w = {}, {}, {} # This variables are for time vs energy
acc_w, acc_avg_energy_w, acc_sum_energy_w = {}, {}, {}
legends = []
for sysCount in [1, 2, 4, 8]:
for nActivePerCell in [1, 2, 4]:
local_epochs = 1
batch_size = 128
error_rate = 0
dir_ = os.path.join(current_path, "saved_minist/outputs-" + str(sysCount))
record_prefix = "-local_epochs-" + str(local_epochs) + \
"-batch_size-" + str(batch_size) + \
"-error_rate-" + str(error_rate) + \
"-nActivePerCell-" + str(nActivePerCell)
trace_dir = os.path.join(dir_, "trace" + record_prefix)
tf_dir = os.path.join(dir_, "tf" + record_prefix)
if os.path.exists(tf_dir):
legends.append("sysCount_" + str(sysCount) + "_nActivePerCell_" + str(nActivePerCell))
epoch, time, wall_clock, loss, acc = parse_time_acc_loss(os.path.join(tf_dir, 'time-acc-loss.txt'), stop_acc=0.955)
avg_time, avg_energy, sum_energy = parse_energy(trace_dir, time[-1])
time_w[iters] = avg_time
time_avg_energy_w[iters] = avg_energy
time_sum_energy_w[iters] = sum_energy
acc_avg_energy_w[iters] = []
acc_sum_energy_w[iters] = []
acc_w[iters] = []
for acc in np.arange(0.9, 0.955, 0.005):
_, time, _, _, _ = parse_time_acc_loss(os.path.join(tf_dir, 'time-acc-loss.txt'), stop_acc=acc)
_, avg_energy, sum_energy = parse_energy(trace_dir, time[-1])
acc_avg_energy_w[iters].append(avg_energy[-1])
acc_sum_energy_w[iters].append(sum_energy[-1])
acc_w[iters].append(acc)
iters += 1
plot_xs_ys(time_w.values(), time_avg_energy_w.values(), xlabel="time", ylabel="energy", title="time-vs-avg_energy",
legends=legends,
save_path=os.path.join(current_path, 'saved_results/minist-time-vs-avg_energy.png'))
plot_xs_ys(acc_w.values(), acc_avg_energy_w.values(), xlabel="acc", ylabel="energy", title="acc-vs-avg_energy",
legends=legends,
save_path=os.path.join(current_path, 'saved_results/minist-acc-vs-avg_energy.png'))
plot_xs_ys(time_w.values(), time_sum_energy_w.values(), xlabel="time", ylabel="energy", title="time-vs-sum_energy",
legends=legends,
save_path=os.path.join(current_path, 'saved_results/minist-time-vs-sum_energy.png'))
plot_xs_ys(acc_w.values(), acc_sum_energy_w.values(), xlabel="acc", ylabel="energy", title="acc-vs-sum_energy",
legends=legends,
save_path=os.path.join(current_path, 'saved_results/minist-acc-vs-sum_energy.png'))
if plot_error1:
colors = ['r-^', 'b-^', 'g-^', 'k-^']
iters = 0
epoch_w, time_w, wall_clock_w, loss_w, acc_w = {}, {}, {}, {}, {}
legends = []
for error_rate in [1e-4, 1e-5, 1e-6, 1e-7]:
sysCount = 2
nActivePerCell = 4
local_epochs = 1
batch_size = 128
dir_ = os.path.join(current_path, "saved_minist/outputs-" + str(sysCount))
record_prefix = "-local_epochs-" + str(local_epochs) + \
"-batch_size-" + str(batch_size) + \
"-error_rate-" + str(error_rate) + \
"-nActivePerCell-" + str(nActivePerCell)
tf_dir = os.path.join(dir_, "tf" + record_prefix)
if os.path.exists(tf_dir):
legends.append(r"$PER={:}$".format(error_rate))
print("Read from ", tf_dir)
time_acc_loss_path = os.path.join(tf_dir, "time-acc-loss.txt")
epochs, times, wall_clocks, losses, accs = parse_time_acc_loss(time_acc_loss_path, stop_epoch=stop_epoch)
epoch_w[iters], time_w[iters], wall_clock_w[iters], loss_w[iters], acc_w[
iters] = epochs, times, wall_clocks, losses, accs
iters += 1
global_time_w['error_rate'] = time_w.values()
global_acc_w['error_rate'] = acc_w.values()
global_legends.append(legends)
global_colors.append(colors)
# plot_xs_ys(time_w.values(), acc_w.values(), xlabel="time", ylabel="acc", title="time-vs-acc", colors=colors,
# legends=legends, save_path=os.path.join(current_path,
# 'saved_results/minist-error-time-vs-acc-stopEpoch-{}.png'.format(stop_epoch)))
# plot_xs_ys1_ys2(epoch_w.values(), acc_w.values(), time_w.values(), xlabel="epoch", ylabel1="acc",
# ylabel2='time', loc=2, title="epoch-vs-acc-time", legends=legends, colors=colors,
# save_path=os.path.join(current_path,
# 'saved_results/minist-error-epoch-vs-acc-time-stopEpoch-{}.png'.format(stop_epoch)))
if plot_error2:
iters = 0
time_w, time_avg_energy_w, time_sum_energy_w = {}, {}, {} # This variables are for time vs energy
acc_w, acc_avg_energy_w, acc_sum_energy_w = {}, {}, {}
legends = []
colors = ['r-^', 'b-^', 'g-^', 'k-^']
for error_rate in [1e-4, 1e-5, 1e-6, 1e-7]:
sysCount = 2
nActivePerCell = 4
local_epochs = 1
batch_size = 128
dir_ = os.path.join(current_path, "saved_minist/outputs-" + str(sysCount))
record_prefix = "-local_epochs-" + str(local_epochs) + \
"-batch_size-" + str(batch_size) + \
"-error_rate-" + str(error_rate) + \
"-nActivePerCell-" + str(nActivePerCell)
trace_dir = os.path.join(dir_, "trace" + record_prefix)
tf_dir = os.path.join(dir_, "tf" + record_prefix)
if os.path.exists(tf_dir):
legends.append(r"$PER={:}$".format(error_rate))
acc_avg_energy_w[iters] = []
acc_sum_energy_w[iters] = []
acc_w[iters] = []
for acc in np.arange(0.9, 0.98, 0.002):
_, time, _, _, _ = parse_time_acc_loss(os.path.join(tf_dir, 'time-acc-loss.txt'), stop_acc=acc)
_, avg_energy, sum_energy = parse_energy(trace_dir, time[-1])
acc_avg_energy_w[iters].append(avg_energy[-1])
acc_sum_energy_w[iters].append(sum_energy[-1])
acc_w[iters].append(acc)
iters += 1
global_energy_w['error_rate'] = acc_avg_energy_w.values()
global_acc1_w['error_rate'] = acc_w.values()
global_legends.append(legends)
global_colors.append(colors)
# plot_xs_ys(acc_w.values(), acc_avg_energy_w.values(), xlabel="acc", ylabel="energy", title="acc-vs-avg_energy", colors=colors,
# legends=legends,
# save_path=os.path.join(current_path, 'saved_results/minist-error-acc-vs-avg_energy.png'))
if plot_naughty1:
plt.figure(figsize=[6.4, 4.8])
noise_type_ratio = {'add': [2e-2, 4e-2, 8e-2, 1e-1],
'multi': [1e-1, 2e-1, 5e-1, 8e-1]}
noise_character = {'add': "NIS_a", 'multi': 'NIS_m'}
for i, noise_type in enumerate(['add', 'multi']):
iters = 0
colors = ['g-^', 'k-^', 'r-^', 'b-^', 'y-^']
epoch_w, time_w, wall_clock_w, loss_w, acc_w = {}, {}, {}, {}, {}
legends = []
for noise_ratio in noise_type_ratio[noise_type]:
sysCount = 2
nActivePerCell = 4
local_epochs = 1
batch_size = 128
error_rate = 0
dir_ = os.path.join(current_path, "saved_minist/outputs-" + str(sysCount))
record_prefix = "-local_epochs-" + str(local_epochs) + \
"-batch_size-" + str(batch_size) + \
"-error_rate-" + str(error_rate) + \
"-nActivePerCell-" + str(nActivePerCell) + \
"-noise_type-" + str(noise_type) + \
"-noise_ratio-" + str(noise_ratio) + \
"-part_ratio-" + str('1,1,1,1')
tf_dir = os.path.join(dir_, "tf" + record_prefix)
if os.path.exists(tf_dir):
legends.append(r"${:}={:}$".format(noise_character[noise_type], noise_ratio))
print("Read from ", tf_dir)
time_acc_loss_path = os.path.join(tf_dir, "time-acc-loss.txt")
epochs, times, wall_clocks, losses, accs = parse_time_acc_loss(time_acc_loss_path, stop_epoch=stop_epoch)
epoch_w[iters], time_w[iters], wall_clock_w[iters], loss_w[iters], acc_w[
iters] = epochs, times, wall_clocks, losses, accs
iters += 1
plt.subplot(221 + i)
plot_xs_ys(time_w.values(), acc_w.values(), xlabel="Elapsed Time (s)", ylabel="Acc (\%)", colors=colors, show=False,
legends=legends, loc=4)
# plot_xs_ys1_ys2(epoch_w.values(), acc_w.values(), time_w.values(), xlabel="epoch", ylabel1="acc", colors=colors,
# ylabel2='time', loc=4,
# title="epoch-vs-acc-time", legends=legends,
# save_path=os.path.join(current_path,
# 'saved_results/minist-naughty-{}-epoch-vs-acc-time-stopEpoch-{}.png'.format(noise_type, stop_epoch)))
if plot_naughty2:
noise_type_ratio = {'add': [2e-2, 4e-2, 8e-2, 1e-1],
'multi': [1e-1, 2e-1, 5e-1, 8e-1]}
noise_character = {'add': "NIS_a", 'multi': 'NIS_m'}
for i, noise_type in enumerate(['add', 'multi']):
iters = 0
colors = ['g-^', 'k-^', 'r-^', 'b-^', 'y-^']
time_w, time_avg_energy_w, time_sum_energy_w = {}, {}, {} # This variables are for time vs energy
acc_w, acc_avg_energy_w, acc_sum_energy_w = {}, {}, {}
legends = []
for noise_ratio in noise_type_ratio[noise_type]:
sysCount = 2
nActivePerCell = 4
local_epochs = 1
batch_size = 128
error_rate = 0
dir = os.path.join(current_path, "saved_minist/outputs-" + str(sysCount))
record_prefix = "-local_epochs-" + str(local_epochs) + \
"-batch_size-" + str(batch_size) + \
"-error_rate-" + str(error_rate) + \
"-nActivePerCell-" + str(nActivePerCell) + \
"-noise_type-" + str(noise_type) + \
"-noise_ratio-" + str(noise_ratio) + \
"-part_ratio-" + str('1,1,1,1')
trace_dir = os.path.join(dir, "trace" + record_prefix)
tf_dir = os.path.join(dir, "tf" + record_prefix)
if os.path.exists(tf_dir):
legends.append(r"${:}={:}$".format(noise_character[noise_type], noise_ratio))
epoch, time, wall_clock, loss, acc = parse_time_acc_loss(os.path.join(tf_dir, 'time-acc-loss.txt'),
stop_acc=0.98)
avg_time, avg_energy, sum_energy = parse_energy(trace_dir, time[-1])
time_w[iters] = avg_time
time_avg_energy_w[iters] = avg_energy
time_sum_energy_w[iters] = sum_energy
acc_avg_energy_w[iters] = []
acc_sum_energy_w[iters] = []
acc_w[iters] = []
for acc in np.arange(0.9, 0.98, 0.002):
epochs, time, _, _, _ = parse_time_acc_loss(os.path.join(tf_dir, 'time-acc-loss.txt'),
stop_acc=acc)
if epochs[-1]==stop_epoch:
print("reach stop epoch")
break
_, avg_energy, sum_energy = parse_energy(trace_dir, time[-1])
acc_avg_energy_w[iters].append(avg_energy[-1])
acc_sum_energy_w[iters].append(sum_energy[-1])
acc_w[iters].append(acc)
iters += 1
# plot_xs_ys(time_w.values(), time_avg_energy_w.values(), xlabel="time", ylabel="energy", colors=colors,
# title="time-vs-avg_energy",
# legends=legends,
# save_path=os.path.join(current_path,
# 'saved_results/minist-naughty-{}-time-vs-avg_energy.png'.format(noise_type)))
plt.subplot(221 + i + 2)
acc_avg_energy_w, acc_w = process_acc_energy(list(acc_avg_energy_w.values()), list(acc_w.values()))
plot_xs_ys(acc_avg_energy_w, acc_w, xlabel="Consumed Energy (J)", ylabel="Acc (\%)", colors=colors, show=False,
legends=legends, loc=4)
# plot_xs_ys(time_w.values(), time_sum_energy_w.values(), xlabel="time", ylabel="energy", colors=colors,
# title="time-vs-sum_energy",
# legends=legends,
# save_path=os.path.join(current_path,
# 'saved_results/minist-naughty-time-vs-sum_energy.png'))
# plot_xs_ys(acc_w.values(), acc_sum_energy_w.values(), xlabel="acc", ylabel="energy", colors=colors,
# title="acc-vs-sum_energy",
# legends=legends,
# save_path=os.path.join(current_path,
# 'saved_results/minist-naughty-acc-vs-sum_energy.png'))
plt.savefig(os.path.join(current_path,
'saved_results/minist-naughty.pdf'))
plt.show()
if plot_activeratio1:
iters = 0
epoch_w, time_w, wall_clock_w, loss_w, acc_w = {}, {}, {}, {}, {}
legends = []
colors = ['r-^', 'b-^', 'g-^', 'k-^']
for nActivePerCell in [1, 2, 4]:
sysCount = 2
local_epochs = 1
batch_size = 128
error_rate = 0
noise_type = 'add'
noise_ratio = 0
dir_ = os.path.join(current_path, "saved_minist/outputs-"+str(sysCount))
record_prefix = "-local_epochs-" + str(local_epochs) + \
"-batch_size-" + str(batch_size) + \
"-error_rate-" + str(error_rate) + \
"-nActivePerCell-" + str(nActivePerCell) + \
"-noise_type-" + str(noise_type) + \
"-noise_ratio-" + str(noise_ratio)
tf_dir = os.path.join(dir_, "tf"+record_prefix)
if os.path.exists(tf_dir):
legends.append(r"$r={:}$".format(nActivePerCell/4))
print("Read from ", tf_dir)
time_acc_loss_path = os.path.join(tf_dir, "time-acc-loss.txt")
epochs, times, wall_clocks, losses, accs = parse_time_acc_loss(time_acc_loss_path, stop_epoch=stop_epoch)
epoch_w[iters], time_w[iters], wall_clock_w[iters], loss_w[iters], acc_w[iters] = epochs, times, wall_clocks, losses, accs
iters += 1
global_time_w['num_clients'] = time_w.values()
global_acc_w['num_clients'] = acc_w.values()
global_legends.append(legends)
global_colors.append(colors)
# plot_xs_ys(time_w.values(), acc_w.values(), xlabel="time", ylabel="acc", title="time-vs-acc",
# legends=legends, save_path=os.path.join(current_path, 'saved_results/minist-active_ratio-time-vs-acc-stopEpoch-{}.png'.format(stop_epoch)))
#
# plot_xs_ys1_ys2(epoch_w.values(), acc_w.values(), time_w.values(), xlabel="epoch", ylabel1="acc", ylabel2='time', title="epoch-vs-acc-time",
# legends=legends, save_path=os.path.join(current_path, 'saved_results/minist-active_ratio-epoch-vs-acc-time-stopEpoch-{}.png'.format(stop_epoch)))
if plot_activeratio2:
iters = 0
time_w, time_avg_energy_w, time_sum_energy_w = {}, {}, {} # This variables are for time vs energy
acc_w, acc_avg_energy_w, acc_sum_energy_w = {}, {}, {}
legends = []
colors = ['r-^', 'b-^', 'g-^', 'k-^']
for nActivePerCell in [1, 2, 4]:
sysCount = 2
local_epochs = 1
batch_size = 128
error_rate = 0
noise_type = 'add'
noise_ratio = 0
dir_ = os.path.join(current_path, "saved_minist/outputs-"+str(sysCount))
record_prefix = "-local_epochs-" + str(local_epochs) + \
"-batch_size-" + str(batch_size) + \
"-error_rate-" + str(error_rate) + \
"-nActivePerCell-" + str(nActivePerCell) + \
"-noise_type-" + str(noise_type) + \
"-noise_ratio-" + str(noise_ratio)
tf_dir = os.path.join(dir_, "tf"+record_prefix)
trace_dir = os.path.join(dir_, "trace" + record_prefix)
if os.path.exists(tf_dir):
legends.append(r"$r={:}$".format(nActivePerCell/4))
acc_avg_energy_w[iters] = []
acc_sum_energy_w[iters] = []
acc_w[iters] = []
for acc in np.arange(0.9, 0.98, 0.002):
_, time, _, _, _ = parse_time_acc_loss(os.path.join(tf_dir, 'time-acc-loss.txt'), stop_acc=acc)
_, avg_energy, sum_energy = parse_energy(trace_dir, time[-1])
acc_avg_energy_w[iters].append(avg_energy[-1])
acc_sum_energy_w[iters].append(sum_energy[-1])
acc_w[iters].append(acc)
iters += 1
global_energy_w['num_clients'] = acc_avg_energy_w.values()
global_acc1_w['num_clients'] = acc_w.values()
global_legends.append(legends)
global_colors.append(colors)
# plot_xs_ys(acc_w.values(), acc_avg_energy_w.values(), xlabel="acc", ylabel="energy", title="acc-vs-avg_energy", colors=colors,
# legends=legends,
# save_path=os.path.join(current_path,
# 'saved_results/minist-active_ratio-acc-vs-avg_energy.png'))
if plot_epoch1:
iters = 0
epoch_w, time_w, wall_clock_w, loss_w, acc_w = {}, {}, {}, {}, {}
legends = []
colors = ['r-^', 'b-^', 'g-^', 'k-^']
for local_epochs in [2, 4, 8, 16]:
batch_size = 128
sysCount = 2
nActivePerCell = 4
error_rate = 0
noise_type = 'add'
noise_ratio = 0
dir_ = os.path.join(current_path, "saved_minist/outputs-" + str(sysCount))
record_prefix = "-local_epochs-" + str(local_epochs) + \
"-batch_size-" + str(batch_size) + \
"-error_rate-" + str(error_rate) + \
"-nActivePerCell-" + str(nActivePerCell) + \
"-noise_type-" + str(noise_type) + \
"-noise_ratio-" + str(noise_ratio)
tf_dir = os.path.join(dir_, "tf" + record_prefix)
if os.path.exists(tf_dir):
legends.append(r"$E={:}$".format(local_epochs))
print("Read from ", tf_dir)
time_acc_loss_path = os.path.join(tf_dir, "time-acc-loss.txt")
epochs, times, wall_clocks, losses, accs = parse_time_acc_loss(time_acc_loss_path, stop_epoch=stop_epoch)
epoch_w[iters], time_w[iters], wall_clock_w[iters], loss_w[iters], acc_w[
iters] = epochs, times, wall_clocks, losses, accs
iters += 1
global_time_w['local_epochs'] = time_w.values()
global_acc_w['local_epochs'] = acc_w.values()
global_legends.append(legends)
global_colors.append(colors)
# plot_xs_ys(time_w.values(), acc_w.values(), xlabel="time", ylabel="acc", title="time-vs-acc", markersize=4, colors=colors,
# legends=legends, save_path=os.path.join(current_path,
# 'saved_results/minist-local_epoch-time-vs-acc-stopEpoch-{}.png'.format(stop_epoch)))
if plot_epoch2:
iters = 0
time_w, time_avg_energy_w, time_sum_energy_w = {}, {}, {} # This variables are for time vs energy
acc_w, acc_avg_energy_w, acc_sum_energy_w = {}, {}, {}
legends = []
colors = ['r-^', 'b-^', 'g-^', 'k-^']
for local_epochs in [2, 4, 8, 16]:
batch_size = 128
sysCount = 2
nActivePerCell = 4
error_rate = 0
noise_type = 'add'
noise_ratio = 0
dir_ = os.path.join(current_path, "saved_minist/outputs-" + str(sysCount))
record_prefix = "-local_epochs-" + str(local_epochs) + \
"-batch_size-" + str(batch_size) + \
"-error_rate-" + str(error_rate) + \
"-nActivePerCell-" + str(nActivePerCell) + \
"-noise_type-" + str(noise_type) + \
"-noise_ratio-" + str(noise_ratio)
tf_dir = os.path.join(dir_, "tf" + record_prefix)
trace_dir = os.path.join(dir_, "trace" + record_prefix)
if os.path.exists(tf_dir):
legends.append(r"$E={:}$".format(local_epochs))
acc_avg_energy_w[iters] = []
acc_sum_energy_w[iters] = []
acc_w[iters] = []
for acc in np.arange(0.9, 0.98, 0.002):
_, time, _, _, _ = parse_time_acc_loss(os.path.join(tf_dir, 'time-acc-loss.txt'), stop_acc=acc)
_, avg_energy, sum_energy = parse_energy(trace_dir, time[-1])
acc_avg_energy_w[iters].append(avg_energy[-1])
acc_sum_energy_w[iters].append(sum_energy[-1])
acc_w[iters].append(acc)
iters += 1
global_energy_w['local_epochs'] = acc_avg_energy_w.values()
global_acc1_w['local_epochs'] = acc_w.values()
global_legends.append(legends)
global_colors.append(colors)
# plot_xs_ys(acc_w.values(), acc_avg_energy_w.values(), xlabel="acc", ylabel="energy", title="acc-vs-avg_energy", colors=colors,
# legends=legends,
# save_path=os.path.join(current_path,
# 'saved_results/minist-local_epoch-acc-vs-avg_energy.png'))
if plot_bs1:
iters = 0
epoch_w, time_w, wall_clock_w, loss_w, acc_w = {}, {}, {}, {}, {}
legends = []
colors = ['r-^', 'b-^', 'g-^', 'k-^']
for batch_size in [8, 32, 128, 512]:
sysCount = 2
nActivePerCell = 4
error_rate = 0
noise_type = 'add'
noise_ratio = 0
local_epochs = 1
dir_ = os.path.join(current_path, "saved_minist/outputs-" + str(sysCount))
record_prefix = "-local_epochs-" + str(local_epochs) + \
"-batch_size-" + str(batch_size) + \
"-error_rate-" + str(error_rate) + \
"-nActivePerCell-" + str(nActivePerCell) + \
"-noise_type-" + str(noise_type) + \
"-noise_ratio-" + str(noise_ratio)
tf_dir = os.path.join(dir_, "tf" + record_prefix)
if os.path.exists(tf_dir):
legends.append(r"$b_s={:}$".format(batch_size))
print("Read from ", tf_dir)
time_acc_loss_path = os.path.join(tf_dir, "time-acc-loss.txt")
epochs, times, wall_clocks, losses, accs = parse_time_acc_loss(time_acc_loss_path, stop_epoch=stop_epoch)
epoch_w[iters], time_w[iters], wall_clock_w[iters], loss_w[iters], acc_w[
iters] = epochs, times, wall_clocks, losses, accs
iters += 1
global_time_w['local_bs'] = time_w.values()
global_acc_w['local_bs'] = acc_w.values()
global_legends.append(legends)
global_colors.append(colors)
# plot_xs_ys(time_w.values(), acc_w.values(), xlabel="time", ylabel="acc", title="time-vs-acc", markersize=4, colors=colors,
# legends=legends, save_path=os.path.join(current_path,
# 'saved_results/minist-bs-time-vs-acc-stopEpoch-{}.png'.format(stop_epoch)))
if plot_bs2:
iters = 0
time_w, time_avg_energy_w, time_sum_energy_w = {}, {}, {} # This variables are for time vs energy
acc_w, acc_avg_energy_w, acc_sum_energy_w = {}, {}, {}
legends = []
colors = ['r-^', 'b-^', 'g-^', 'k-^']
for batch_size in [8, 32, 128, 512]:
sysCount = 2
nActivePerCell = 4
error_rate = 0
noise_type = 'add'
noise_ratio = 0
local_epochs = 1
dir_ = os.path.join(current_path, "saved_minist/outputs-" + str(sysCount))
record_prefix = "-local_epochs-" + str(local_epochs) + \
"-batch_size-" + str(batch_size) + \
"-error_rate-" + str(error_rate) + \
"-nActivePerCell-" + str(nActivePerCell) + \
"-noise_type-" + str(noise_type) + \
"-noise_ratio-" + str(noise_ratio)
tf_dir = os.path.join(dir_, "tf" + record_prefix)
trace_dir = os.path.join(dir_, "trace" + record_prefix)
if os.path.exists(tf_dir):
legends.append(r"$b_s={:}$".format(batch_size))
acc_avg_energy_w[iters] = []
acc_sum_energy_w[iters] = []
acc_w[iters] = []
for acc in np.arange(0.9, 0.98, 0.002):
_, time, _, _, _ = parse_time_acc_loss(os.path.join(tf_dir, 'time-acc-loss.txt'), stop_acc=acc)
_, avg_energy, sum_energy = parse_energy(trace_dir, time[-1])
acc_avg_energy_w[iters].append(avg_energy[-1])
acc_sum_energy_w[iters].append(sum_energy[-1])
acc_w[iters].append(acc)
iters += 1
global_energy_w['local_bs'] = acc_avg_energy_w.values()
global_acc1_w['local_bs'] = acc_w.values()
global_legends.append(legends)
global_colors.append(colors)
# plot_xs_ys(acc_w.values(), acc_avg_energy_w.values(), xlabel="acc", ylabel="energy", title="acc-vs-avg_energy", colors=colors,
# legends=legends,
# save_path=os.path.join(current_path,
# 'saved_results/minist-bs-acc-vs-avg_energy.png'))
if plot_all1:
plt.figure(figsize=[6.4, 4.8])
for i, (time_w, acc_w) in enumerate(zip(global_time_w.values(), global_acc_w.values())):
plt.subplot(221+i)
plot_xs_ys(time_w, acc_w, xlabel="Elapsed Time (s)", ylabel="Acc (\%)", markersize=0, colors=global_colors[i], show=False, legends=global_legends[i])
plt.savefig(os.path.join(current_path,
'saved_results/minist-all-time-vs-acc.pdf'))
plt.show()
if plot_all2:
plt.figure(figsize=[6.4, 4.8])
for i, (energy_w, acc_w) in enumerate(zip(global_energy_w.values(), global_acc1_w.values())):
energy_w, acc_w = process_acc_energy(energy_w, acc_w)
plt.subplot(221+i)
plot_xs_ys(energy_w, acc_w, xlabel="Consumed Energy (J)", ylabel="Acc (\%)", markersize=0, colors=global_colors[i], show=False, legends=global_legends[i])
plt.savefig(os.path.join(current_path,
'saved_results/minist-all-energy-vs-acc.pdf'))
plt.show()
if plot_partition:
iters = 0
colors = ['r--', 'r-^', 'b-^', 'g-^', 'k-^', 'g--', 'k--']
time_w, time_avg_energy_w, time_sum_energy_w = {}, {}, {} # This variables are for time vs energy
acc_w, acc_avg_energy_w, acc_energy_ratio = {}, {}, {}
legends = []
partitions = ['1,1,1,1', '8,1,1,1', '64,1,1,1', '512,1,1,1', '4096,1,1,1', "512,512,512,1", "4096,4096,4096,1"]
for part_ratio in partitions:
sysCount = 1
nActivePerCell = 4
batch_size = 128
error_rate = 0
noise_type = 'add'
noise_ratio = 0
local_epochs = 1
dir_ = os.path.join(current_path, "saved_minist/outputs-" + str(sysCount))
record_prefix = "-local_epochs-" + str(local_epochs) + \
"-batch_size-" + str(batch_size) + \
"-error_rate-" + str(error_rate) + \
"-nActivePerCell-" + str(nActivePerCell) + \
"-noise_type-" + str(noise_type) + \
"-noise_ratio-" + str(noise_ratio) + \
"-part_ratio-" + part_ratio
trace_dir = os.path.join(dir_, "trace" + record_prefix)
tf_dir = os.path.join(dir_, "tf" + record_prefix)
if os.path.exists(tf_dir):
legends.append("partition ratio: " + str(part_ratio))
time_acc_loss_path = os.path.join(tf_dir, "time-acc-loss.txt")
epochs, times, wall_clocks, losses, accs = parse_time_acc_loss(time_acc_loss_path, stop_epoch=9)
time_w[iters] = times
acc_w[iters] = accs
avg_time, avg_energy, energys = parse_all_energy(trace_dir, times[-1])
acc_avg_energy_w[iters] = avg_energy
acc_energy_ratio[iters] = energys[0]/energys[-1]
iters += 1
plot_xs_ys(time_w.values(), acc_energy_ratio.values(), xlabel="time", ylabel="energy_ratio",
title="energy_ratio-vs-loss", colors=colors,
legends=legends, loc=2,
save_path=os.path.join(current_path,
'saved_results/minist-partition-energy_ratio-vs-loss.png'))
print("partition\t energy_sumed\t time_consumed\t acc \t energy_ratio\t")
for iters in range(len(partitions)):
print("{}\t & {:.3f}\t & {:.3f}\t & {:.3f}\t & {:.3f}\t \\\\hline".format(partitions[iters], acc_avg_energy_w[iters][-1].item()/acc_avg_energy_w[0][-1].item(),
time_w[iters][-1]/time_w[0][-1], acc_w[iters][-1], acc_energy_ratio[iters][-1].item()))
| 45.136364
| 171
| 0.526572
| 4,219
| 34,755
| 4.034368
| 0.047405
| 0.035368
| 0.041713
| 0.035956
| 0.893015
| 0.882322
| 0.862053
| 0.836202
| 0.831326
| 0.821926
| 0
| 0.020062
| 0.33739
| 34,755
| 769
| 172
| 45.195059
| 0.71905
| 0.133362
| 0
| 0.749071
| 0
| 0.003717
| 0.105699
| 0.025285
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001859
| false
| 0
| 0.007435
| 0
| 0.011152
| 0.016729
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
427e10d6ddb82ee987943ebf3bbfaef6c4c429e7
| 17,316
|
py
|
Python
|
GradMeth.py
|
L-F-A/Optimization
|
a8e2252941c8839891411831920808867b45a323
|
[
"MIT"
] | null | null | null |
GradMeth.py
|
L-F-A/Optimization
|
a8e2252941c8839891411831920808867b45a323
|
[
"MIT"
] | null | null | null |
GradMeth.py
|
L-F-A/Optimization
|
a8e2252941c8839891411831920808867b45a323
|
[
"MIT"
] | null | null | null |
import numpy as np
import warnings
#########################################################################################################
# IMPORTANT NOTE #
#Every method splits cases of function with or without supplementary arguments at the very beginning #
#as to avoid if-else operations for it at each iteration in the while loops. This means just #
#copy-pasting the code twice in each method a changing func(w) to func(w,*args) etc. Perhaps not as neat#
#as possible, but might save computation time. #
#########################################################################################################
def GradDesc0(dfunc,w,eta,tol,tol_rel,iteMax,args=None):
#########################################################################################################
# Vanilla gradient descent with constant stepsize #
# #
# INPUTS: #
# dfunc : Function giving the derivative of func to be minimized #
# w : Initial value for w #
# eta : Initial stepsize #
# tol : Absolute tolerance #
# tol_rel : Relative tolerance #
# args : Tuple containing all other argument that func and dfunc take #
# #
# OUPUTS: #
# w : Solution for w where func is minimum #
# ite : How many iterations is took #
#########################################################################################################
ite=0
ite_conv=0
ite_follow=0
err=1.
err_rel=1.
if args is None:#split function with or without supplementary arguments at the very beginning as
#to avoid if operation for it at each iteration in the while loop
while (err>tol or err_rel>tol_rel) and (ite_conv<5):#tol conditions respected for 5
#iterations in a row
if ite==iteMax:
warnings.warn('Maximum number of iterations reached: no convergence')
break
ite+=1
g=dfunc(w)
w0=w.copy()
w=w-eta*g
err=np.linalg.norm(w-w0)
err_rel=err/np.linalg.norm(w0+np.finfo(float).eps)
if err<tol or err_rel<tol_rel:#Making certain that it is really for 5 iterations
#in a row and not 5 non consequtive ones
if ite_follow==0:
ite_follow=ite
ite_conv+=1
elif ite==ite_follow+1:
ite_follow=ite
ite_conv+=1
else:
ite_follow=0
ite_conv=0
else:
while (err>tol or err_rel>tol_rel) and (ite_conv<5):#tol conditions respected for 5
#iterations in a row
if ite==iteMax:
warnings.warn('Maximum number of iterations reached: no convergence')
break
ite+=1
g=dfunc(w,*args)
w0=w.copy()
w=w-eta*g
err=np.linalg.norm(w-w0)
err_rel=err/np.linalg.norm(w0+np.finfo(float).eps)
if err<tol or err_rel<tol_rel:#Making certain that it is really for 5 iterations
#in a row and not 5 non consequtive ones
if ite_follow==0:
ite_follow=ite
ite_conv+=1
elif ite==ite_follow+1:
ite_follow=ite
ite_conv+=1
else:
ite_follow=0
ite_conv=0
return w,ite
def GradDesc(func,dfunc,w,eta,tol,tol_rel,iteMax,args=None):
#########################################################################################################
# Gradient descent with stepsize adaptation #
# see Marc Toussaint U Stuttgart: Intro to Optimization #
# https://ipvs.informatik.uni-stuttgart.de/mlr/marc/teaching/13-Optimization/02-gradientMethods.pdf #
# #
# INPUTS: #
# func : Function to be minimized #
# dfunc : Function giving the derivative of func #
# w : Initial value for w #
# eta : Initial stepsize #
# tol : Absolute tolerance #
# tol_rel : Relative tolerance #
# args : Tuple containing all other argument that func and dfunc take #
# #
# OUPUTS: #
# w : Solution for w where func is minimum #
# ite : How many iterations is took #
#########################################################################################################
ite=0
ite_conv=0
ite_follow=0
err=1.
err_rel=1.
if args is None:
while (err>tol or err_rel>tol_rel) and (ite_conv<5):#tol conditions respected for 5
#iterations in a row
if ite==iteMax:
warnings.warn('Maximum number of iterations reached: no convergence')
break
ite+=1
g=dfunc(w)
y=w-eta*g/np.linalg.norm(g)
w0=w.copy()
if func(y) <= func(w):
w=y.copy()
eta=1.2*eta #1.2 Magic number from Marc Toussaint U Stuttgart: Intro
#to Optimization
else:
eta=0.5*eta #0.5 Magic number from Marc Toussaint U Stuttgart: Intro
#to Optimization
err=np.linalg.norm(y-w0)
err_rel=err/np.linalg.norm(w0+np.finfo(float).eps)
if err<tol or err_rel<tol_rel:#Making certain that it is really for 5 iterations
#in a row and not 5 non consequtive ones
if ite_follow==0:
ite_follow=ite
ite_conv+=1
elif ite==ite_follow+1:
ite_follow=ite
ite_conv+=1
else:
ite_follow=0
ite_conv=0
else:
while (err>tol or err_rel>tol_rel) and (ite_conv<5):#tol conditions respected for 5
#iterations in a row
if ite==iteMax:
warnings.warn('Maximum number of iterations reached: no convergence')
break
ite+=1
g=dfunc(w,*args)
y=w-eta*g/np.linalg.norm(g)
w0=w.copy()
if func(y,*args) <= func(w,*args):
w=y.copy()
eta=1.2*eta #1.2 Magic number from Marc Toussaint U Stuttgart: Intro
#to Optimization
else:
eta=0.5*eta #0.5 Magic number from Marc Toussaint U Stuttgart: Intro
#to Optimization
err=np.linalg.norm(y-w0)
err_rel=err/np.linalg.norm(w0+np.finfo(float).eps)
if err<tol or err_rel<tol_rel:#Making certain that it is really for 5 iterations
#in a row and not 5 non consequtive ones
if ite_follow==0:
ite_follow=ite
ite_conv+=1
elif ite==ite_follow+1:
ite_follow=ite
ite_conv+=1
else:
ite_follow=0
ite_conv=0
return w,ite
def Rprop(dfunc,w,eta,tol,tol_rel,iteMax,args=None):
#########################################################################################################
# Resilient Back Propagation #
# see Marc Toussaint U Stuttgart: Intro to Optimization #
# https://ipvs.informatik.uni-stuttgart.de/mlr/marc/teaching/13-Optimization/02-gradientMethods.pdf #
# #
# INPUTS: #
# dfunc : Function giving the derivative of the function to be minimized #
# w : Initial value for w #
# eta : Initial stepsize #
# tol : Absolute tolerance #
# tol_rel : Relative tolerance #
# args : Tuple containing all other argument that func and dfunc take #
# #
# OUPUTS: #
# w : Solution for w where func is minimum #
# ite : How many iterations is took #
#########################################################################################################
ite=0
ite_conv=0
ite_follow=0
n=len(w)
err=1.
err_rel=1.
g0=np.zeros(n)
eta=eta*np.ones(n)
if args is None:
while (err>tol or err_rel>tol_rel) and (ite_conv<5):#tol conditions respected for 5
#iterations in a row
if ite==iteMax:
warnings.warn('Maximum number of iterations reached: no convergence')
break
ite+=1
g=dfunc(w)
w0=w.copy()
for i in range(n):
if g[i]*g0[i]>0:
eta[i]=1.2*eta[i]
w[i]=w[i]-eta[i]*np.sign(g[i])
g0[i]=g[i]
elif g[i]*g0[i]<0.:
eta[i]=0.5*eta[i]
w[i]=w[i]-eta[i]*np.sign(g[i])
g0[i]=0.
else:
w[i]=w[i]-eta[i]*np.sign(g[i])
g0[i]=g[i]
err=np.linalg.norm(w-w0)
err_rel=err/np.linalg.norm(w0+np.finfo(float).eps)
if err<tol or err_rel<tol_rel:#Making certain that it is really for 5 iterations
#in a row and not 5 non consequtive ones
if ite_follow==0:
ite_follow=ite
ite_conv+=1
elif ite==ite_follow+1:
ite_follow=ite
ite_conv+=1
else:
ite_follow=0
ite_conv=0
else:
while (err>tol or err_rel>tol_rel) and (ite_conv<5):#tol conditions respected for 5
#iterations in a row
if ite==iteMax:
warnings.warn('Maximum number of iterations reached: no convergence')
break
ite+=1
g=dfunc(w,*args)
w0=w.copy()
for i in range(n):
if g[i]*g0[i]>0:
eta[i]=1.2*eta[i]
w[i]=w[i]-eta[i]*np.sign(g[i])
g0[i]=g[i]
elif g[i]*g0[i]<0.:
eta[i]=0.5*eta[i]
w[i]=w[i]-eta[i]*np.sign(g[i])
g0[i]=0.
else:
w[i]=w[i]-eta[i]*np.sign(g[i])
g0[i]=g[i]
err=np.linalg.norm(w-w0)
err_rel=err/np.linalg.norm(w0+np.finfo(float).eps)
if err<tol or err_rel<tol_rel:#Making certain that it is really for 5 iterations
#in a row and not 5 non consequtive ones
if ite_follow==0:
ite_follow=ite
ite_conv+=1
elif ite==ite_follow+1:
ite_follow=ite
ite_conv+=1
else:
ite_follow=0
ite_conv=0
return w,ite
def GradDescSteep(dfunc,w,eta,tol,tol_rel,iteMax,args=None):
#using stepsize eta_n from https://en.wikipedia.org/wiki/Gradient_descent
err=1.
err_rel=1.
ite=0
ite_conv=0
ite_follow=0
w_00=w.copy()
if args is None:
g_00=dfunc(w)
while (err>tol or err_rel>tol_rel) and (ite_conv<5):
if ite==iteMax:
warnings.warn('Maximum number of iterations reached: no convergence')
break
ite+=1
g0=dfunc(w)
if ite==0:
w=w-eta*g0
else:
delta=np.dot(w-w_00,g0-g_00)/np.dot(g0-g_00,g0-g_00)
w_00=w.copy()
w=w-delta*g0
g_00=g0.copy()
err=np.linalg.norm(w-w_00)
err_rel=err/np.linalg.norm(w_00+np.finfo(float).eps)
if err<tol or err_rel<tol_rel:#Making certain that it is really for 5 iterations
#in a row and not 5 non consequtive ones
if ite_follow==0:
ite_follow=ite
ite_conv+=1
elif ite==ite_follow+1:
ite_follow=ite
ite_conv+=1
else:
ite_follow=0
ite_conv=0
else:
g_00=dfunc(w,*args)
while (err>tol or err_rel>tol_rel) and (ite_conv<5):
if ite==iteMax:
warnings.warn('Maximum number of iterations reached: no convergence')
break
ite+=1
g0=dfunc(w,*args)
if ite==1:
w=w-eta*g0
else:
delta=np.dot(w-w_00,g0-g_00)/np.dot(g0-g_00,g0-g_00)
w_00=w.copy()
w=w-delta*g0
g_00=g0.copy()
err=np.linalg.norm(w-w_00)
err_rel=err/np.linalg.norm(w_00+np.finfo(float).eps)
if err<tol or err_rel<tol_rel:#Making certain that it is really for 5 iterations
#in a row and not 5 non consequtive ones
if ite_follow==0:
ite_follow=ite
ite_conv+=1
elif ite==ite_follow+1:
ite_follow=ite
ite_conv+=1
else:
ite_follow=0
ite_conv=0
return w,ite
| 43.837975
| 106
| 0.358917
| 1,689
| 17,316
| 3.589698
| 0.103612
| 0.065314
| 0.032987
| 0.029029
| 0.904998
| 0.901369
| 0.901369
| 0.882237
| 0.868712
| 0.8476
| 0
| 0.027849
| 0.5272
| 17,316
| 394
| 107
| 43.949239
| 0.712715
| 0.314622
| 0
| 0.930894
| 0
| 0
| 0.038225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.00813
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
35fe6a19c27afba8ac7035c90b127989f079c83d
| 1,975
|
py
|
Python
|
model_title.py
|
deimqs/ClusterModel
|
a073ffff012ad3404acd9ce12396f63fe7e81109
|
[
"BSD-3-Clause"
] | null | null | null |
model_title.py
|
deimqs/ClusterModel
|
a073ffff012ad3404acd9ce12396f63fe7e81109
|
[
"BSD-3-Clause"
] | null | null | null |
model_title.py
|
deimqs/ClusterModel
|
a073ffff012ad3404acd9ce12396f63fe7e81109
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This file contains the code name.
"""
def show():
"""
Show the title of the model pipeline. Based on F.R. NIKA soft.
See also http://patorjk.com/software/taag
Parameters
----------
Outputs
----------
"""
print("=====================================================================")
print(" ___ __ ___ __ __ ")
print(" / __) / _\ / __) / \ ( ) ")
print(" ( (__ / \( (_ \( O )/ (_/\ ")
print(" \___)\_/\_/ \___/ \__/ \____/ ")
print("=====================================================================")
print(" Cluster Atmosphere modeling for Gamma-ray Observations Libraries ")
print("---------------------------------------------------------------------")
print(" ")
#print("=================================================================")
#print(" ______ _____ _____ ______ _____ ")
#print(" | ___ \ ___| __ \| ___ \ ___| ")
#print(" | |_/ / |__ | | \/| |_/ / |__ ")
#print(" | __/| __|| | __ | /| __| ")
#print(" | | | |___| |_\ \| |\ \| |___ ")
#print(" \_| \____/ \____/\_| \_\____/ ")
#print("=================================================================")
#print(" Pipeline for the Estimation of Gamma Ray Emission in clusters ")
#print("-----------------------------------------------------------------")
#print(" ")
# Galaxy Cluster Hot Gas Modeling Pipeline Gamma Ray Observations Analysis and Multi-Wavelength
| 47.02381
| 99
| 0.27038
| 82
| 1,975
| 5.146341
| 0.585366
| 0.379147
| 0.42654
| 0.42654
| 0.130332
| 0.130332
| 0.130332
| 0.130332
| 0.130332
| 0.130332
| 0
| 0
| 0.409114
| 1,975
| 41
| 100
| 48.170732
| 0.361611
| 0.551899
| 0
| 0.2
| 0
| 0
| 0.749095
| 0.249698
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| true
| 0
| 0
| 0
| 0.1
| 0.9
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
675f4e81e5be6e6d5724ff1aa2468482b3d780c5
| 19,138
|
py
|
Python
|
Job/AllJob.py
|
msg-gg/msg.gg-crawling
|
9952cff52b264bfe86ecd129372416bfcc9bdc84
|
[
"MIT"
] | null | null | null |
Job/AllJob.py
|
msg-gg/msg.gg-crawling
|
9952cff52b264bfe86ecd129372416bfcc9bdc84
|
[
"MIT"
] | null | null | null |
Job/AllJob.py
|
msg-gg/msg.gg-crawling
|
9952cff52b264bfe86ecd129372416bfcc9bdc84
|
[
"MIT"
] | null | null | null |
import json
# with open("groups.json") as groups:
# groups_json = json.load(groups)
#
# print(groups_json["group1"])
import time
from collections import OrderedDict
from selenium import webdriver
import pandas
from webdriver_manager.chrome import ChromeDriverManager
import urllib.request
from sys import path
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.implicitly_wait(3) # 웹 자원 로드를 위해 3초 기다려줌
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
import time
# 이미지 크롤링
body = driver.find_element_by_tag_name('body')
from collections import OrderedDict
import json
data = OrderedDict()
world = []
reboot = []
reboot2 = []
aurora = []
red = []
enosis = []
union = []
scania = []
luna = []
zenith = []
croa = []
bera = []
elysium = []
arcane = []
nova = []
driver.get('https://maple.gg/world')
for i in range(1, 45):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
world.append(character)
driver.get('https://maple.gg/world/luna')
for i in range(1, 45):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
luna.append(character)
driver.get('https://maple.gg/world/scania')
for i in range(1, 44):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
scania.append(character)
driver.get('https://maple.gg/world/elysium')
for i in range(1, 44):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
elysium.append(character)
driver.get('https://maple.gg/world/reboot')
for i in range(1, 44):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
reboot.append(character)
driver.get('https://maple.gg/world/croa')
for i in range(1, 44):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
croa.append(character)
driver.get('https://maple.gg/world/aurora')
for i in range(1, 44):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
aurora.append(character)
driver.get('https://maple.gg/world/bera')
for i in range(1, 44):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
bera.append(character)
driver.get('https://maple.gg/world/red')
for i in range(1, 44):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
red.append(character)
driver.get('https://maple.gg/world/union')
for i in range(1, 44):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
union.append(character)
driver.get('https://maple.gg/world/zenith')
for i in range(1, 44):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
zenith.append(character)
driver.get('https://maple.gg/world/enosis')
for i in range(1, 44):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
enosis.append(character)
driver.get('https://maple.gg/world/reboot2')
for i in range(1, 44):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
reboot2.append(character)
driver.get('https://maple.gg/world/arcane')
for i in range(1, 44):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
arcane.append(character)
driver.get('https://maple.gg/world/nova')
for i in range(1, 44):
character = {}
if i <= 22:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[1]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
else:
rank = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[1]').text
name = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[2]').text
people = driver.find_element_by_xpath(
'//*[@id="app"]/div[2]/section/div/div/div[2]/div[' + str(i) + ']/div[3]/div/div').text
character['world'] = rank
character['name'] = name
character['people'] = people
nova.append(character)
data['world'] = world
data['luna'] = luna
data['scania'] = scania
data['elysium'] = elysium
data['reboot'] = reboot
data['croa'] = croa
data['aurora'] = aurora
data['bera'] = bera
data['red'] = red
data['union'] = union
data['zenith'] = zenith
data['enosis'] = enosis
data['reboot2'] = reboot2
data['arcane'] = arcane
data['nova'] = nova
with open('AllJob.json', 'w', encoding="utf-8") as make_file: json.dump(data, make_file, ensure_ascii=False, indent="\t")
| 42.528889
| 121
| 0.550998
| 2,703
| 19,138
| 3.797262
| 0.037366
| 0.122759
| 0.150721
| 0.168453
| 0.897311
| 0.897311
| 0.894778
| 0.894778
| 0.838854
| 0.838854
| 0
| 0.023743
| 0.218727
| 19,138
| 450
| 121
| 42.528889
| 0.662721
| 0.006584
| 0
| 0.823529
| 0
| 0.220588
| 0.333579
| 0.232032
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.031863
| 0
| 0.031863
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
6763a6321bdfba0789f11ae606901d148aa31142
| 3,396
|
py
|
Python
|
tests/nn/architectures/test_hourglass.py
|
preeti98/sleap
|
203c3a03c0c54f8dab242611d9a8d24595e98081
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
tests/nn/architectures/test_hourglass.py
|
preeti98/sleap
|
203c3a03c0c54f8dab242611d9a8d24595e98081
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
tests/nn/architectures/test_hourglass.py
|
preeti98/sleap
|
203c3a03c0c54f8dab242611d9a8d24595e98081
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import numpy as np
import tensorflow as tf
from sleap.nn.system import use_cpu_only; use_cpu_only() # hide GPUs for test
from sleap.nn.architectures import hourglass
from sleap.nn.config import HourglassConfig
class HourglassTests(tf.test.TestCase):
def test_hourglass_reference(self):
# Reference implementation from the original paper.
arch = hourglass.Hourglass(
down_blocks=4,
up_blocks=4,
stem_filters=128,
stem_stride=4,
filters=256,
filter_increase=128,
interp_method="nearest",
stacks=3
)
x_in = tf.keras.layers.Input((256, 256, 1))
x, x_mid = arch.make_backbone(x_in)
model = tf.keras.Model(x_in, x)
param_counts = [
np.prod(train_var.shape) for train_var in model.trainable_weights
]
with self.subTest("output shape"):
self.assertAllEqual(
[out.shape for out in model.output],
[(None, 64, 64, 256)] * 3)
with self.subTest("encoder stride"):
self.assertEqual(arch.encoder_features_stride, 64)
with self.subTest("decoder stride"):
self.assertEqual(arch.decoder_features_stride, 4)
with self.subTest("number of layers"):
self.assertEqual(len(model.layers), 116)
with self.subTest("number of trainable weights"):
self.assertEqual(len(model.trainable_weights), 156)
with self.subTest("trainable parameter count"):
self.assertEqual(np.sum(param_counts), 65969408)
with self.subTest("total parameter count"):
self.assertEqual(model.count_params(), 66002944)
with self.subTest("number of intermediate features"):
self.assertEqual(len(x_mid), 3)
def test_hourglass_reference_from_config(self):
# Reference implementation from the original paper.
arch = hourglass.Hourglass.from_config(HourglassConfig(
stem_stride=4,
max_stride=64,
output_stride=4,
stem_filters=128,
filters=256,
filter_increase=128,
stacks=3,
))
x_in = tf.keras.layers.Input((256, 256, 1))
x, x_mid = arch.make_backbone(x_in)
model = tf.keras.Model(x_in, x)
param_counts = [
np.prod(train_var.shape) for train_var in model.trainable_weights
]
with self.subTest("output shape"):
self.assertAllEqual(
[out.shape for out in model.output],
[(None, 64, 64, 256)] * 3)
with self.subTest("encoder stride"):
self.assertEqual(arch.encoder_features_stride, 64)
with self.subTest("decoder stride"):
self.assertEqual(arch.decoder_features_stride, 4)
with self.subTest("number of layers"):
self.assertEqual(len(model.layers), 116)
with self.subTest("number of trainable weights"):
self.assertEqual(len(model.trainable_weights), 156)
with self.subTest("trainable parameter count"):
self.assertEqual(np.sum(param_counts), 65969408)
with self.subTest("total parameter count"):
self.assertEqual(model.count_params(), 66002944)
with self.subTest("number of intermediate features"):
self.assertEqual(len(x_mid), 3)
| 40.428571
| 78
| 0.617491
| 404
| 3,396
| 5.05198
| 0.225248
| 0.062714
| 0.117589
| 0.061734
| 0.803528
| 0.77707
| 0.77707
| 0.77707
| 0.77707
| 0.77707
| 0
| 0.044709
| 0.282097
| 3,396
| 83
| 79
| 40.915663
| 0.792453
| 0.034747
| 0
| 0.72
| 0
| 0
| 0.099878
| 0
| 0
| 0
| 0
| 0
| 0.213333
| 1
| 0.026667
| false
| 0
| 0.066667
| 0
| 0.106667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
67716dc1a1757a79b8e45297648042ff1177df49
| 24,188
|
py
|
Python
|
object_detection2/modeling/matcher.py
|
vghost2008/wml
|
d0c5a1da6c228e321ae59a563e9ac84aa66266ff
|
[
"MIT"
] | 6
|
2019-12-10T17:18:56.000Z
|
2022-03-01T01:00:35.000Z
|
object_detection2/modeling/matcher.py
|
vghost2008/wml
|
d0c5a1da6c228e321ae59a563e9ac84aa66266ff
|
[
"MIT"
] | 2
|
2021-08-25T16:16:01.000Z
|
2022-02-10T05:21:19.000Z
|
object_detection2/modeling/matcher.py
|
vghost2008/wml
|
d0c5a1da6c228e321ae59a563e9ac84aa66266ff
|
[
"MIT"
] | 2
|
2019-12-07T09:57:35.000Z
|
2021-09-06T04:58:10.000Z
|
#coding=utf-8
import tfop
import wmodule
import tensorflow as tf
import basic_tftools as btf
from .build_matcher import MATCHER
import wml_tfutils as wmlt
import object_detection2.bboxes as odb
import wsummary
@MATCHER.register()
class Matcher(wmodule.WChildModule):
def __init__(self,thresholds,allow_low_quality_matches=False,same_pos_label=None,*args,**kwargs):
'''
:param thresholds: [threshold] or [threshold_low,threshold_high]
:param allow_low_quality_matches: if it's true, the box which match some gt box best will be set to positive
:param same_pos_label: int, if it's not None, then all positive boxes' label will be set to same_pos_label
'''
super().__init__(*args,**kwargs)
print("Matcher")
if len(thresholds) == 1:
thresholds = [thresholds[0],thresholds[0]]
self.thresholds = thresholds
self.allow_low_quality_matches = allow_low_quality_matches
self.same_pos_label = same_pos_label
@btf.show_input_shape
def forward(self,boxes,gboxes,glabels,glength,*args,**kwargs):
'''
:param boxes: [1,X,4] or [batch_size,X,4] proposal boxes
:param gboxes: [batch_size,Y,4] groundtruth boxes
:param glabels: [batch_size,Y] groundtruth labels
:param glength: [batch_size] boxes size
:return:
labels: [batch_size,X,4], the label of boxes, -1 indict ignored box, which will not calculate loss,
0 is background
scores: [batch_size,X], the overlap score with boxes' match gt box
indices: [batch_size,X] the index of matched gt boxes when it's a positive anchor box, else it's -1
'''
labels,scores,indices = tfop.matcher(bboxes=boxes,gboxes=gboxes,
glabels=glabels,
length=glength,
neg_threshold=self.thresholds[0],
pos_threshold=self.thresholds[1],
max_overlap_as_pos=self.allow_low_quality_matches,
force_in_gtbox=False)
if self.same_pos_label:
labels = tf.where(tf.greater(labels,0),tf.ones_like(labels)*self.same_pos_label,labels)
return labels,scores,indices
'''
Bridging the Gap Between Anchor-based and Anchor-free Detection via
Adaptive Training Sample Selection
'''
@MATCHER.register()
class ATSSMatcher(wmodule.WChildModule):
MIN_IOU_THRESHOLD = 0.1
def __init__(self,k=9,same_pos_label=None,*args,**kwargs):
'''
'''
super().__init__(*args,**kwargs)
self.k = k
self.same_pos_label = same_pos_label
print(f"ATSSMatcher v1.0 k={k}")
@staticmethod
def moments(data,threshold,axes=-1):
mask = tf.greater_equal(data,threshold)
mask_f = tf.cast(mask,data.dtype)
data_f = tf.where(mask,data,tf.zeros_like(data))
data_sum = tf.reduce_sum(data_f,axis=axes,keepdims=True)
data_nr = tf.maximum(tf.reduce_sum(mask_f,axis=axes,keepdims=True),1)
data_mean = data_sum/data_nr
s_diff = tf.squared_difference(data, tf.stop_gradient(data_mean))
s_diff = tf.where(mask,s_diff,tf.zeros_like(s_diff))
variance = tf.reduce_sum(
s_diff,
axis=axes,
keepdims=True,
name="variance")/data_nr
return data_mean,variance
def forward(self,boxes,gboxes,glabels,glength,boxes_len,*args,**kwargs):
'''
:param boxes: [1,X,4] or [batch_size,X,4] proposal boxes
:param gboxes: [batch_size,Y,4] groundtruth boxes
:param glabels: [batch_size,Y] groundtruth labels
:param glength: [batch_size] boxes size
:param boxes_len: [len0,len1,len2,...] sum(boxes_len)=X, boxes len in each layer
:return:
labels: [batch_size,X,4], the label of boxes, -1 indict ignored box, which will not calculate loss,
0 is background
scores: [batch_size,X], the overlap score with boxes' match gt box
indices: [batch_size,X] the index of matched gt boxes when it's a positive anchor box, else it's -1
'''
with tf.name_scope("ATTSMatcher"):
assert isinstance(boxes_len,(list,tuple)), "error boxes len type."
dis_matrix = odb.batch_bboxes_pair_wrapv2(gboxes,boxes,
fn=odb.get_bboxes_dis,
len0=glength,
scope="get_dis_matrix")
iou_matrix = odb.batch_bboxes_pair_wrapv2(gboxes,boxes,
fn=odb.get_iou_matrix,
len0=glength,
scope="get_iou_matrix")
is_center_in_gtboxes = odb.batch_bboxes_pair_wrapv2(gboxes,boxes,
fn=odb.is_center_in_boxes,
len0=glength,
dtype=tf.bool,
scope="get_is_center_in_gtbboxes")
#dis_matrix = tf.Print(dis_matrix,[tf.shape(dis_matrix),tf.reduce_sum(boxes_len)],summarize=100)
#在每一层获取距离最近的k个proposal box
dis_matrix = tf.split(dis_matrix,boxes_len,axis=2)
offsets = [0]
with tf.name_scope("get_offset"):
for i in range(len(boxes_len)-1):
n_off = offsets[-1]+boxes_len[i]
offsets.append(n_off)
pos_indices = []
for tl,bl,dism in zip(offsets,boxes_len,dis_matrix):
values,indices = tf.nn.top_k(-dism,k=tf.minimum(self.k,bl),sorted=False)
indices = indices+tl
pos_indices.append(indices)
pos_indices = tf.concat(pos_indices,axis=-1)
pos_ious = btf.batch_gather(iou_matrix,pos_indices,name="gather_pos_ious")
#对各层top k中iou大于MIN_IOU_THRESHOLD的统计mean+std
iou_mean,iou_var = self.moments(pos_ious,threshold=self.MIN_IOU_THRESHOLD,axes=[-1])
#wsummary.histogram_or_scalar(iou_mean,"iou_mean")
with tf.device("/cpu:0"):
max_iou_threshold = tf.reduce_max(pos_ious,axis=-1,keepdims=True)
iou_std = tf.sqrt(iou_var)
iou_threshold = iou_mean+iou_std
iou_threshold = tf.minimum(max_iou_threshold,iou_threshold)
'''
原算法中表示的为仅从上面的topk中取正样本,这里从所有的样本中取正样本
'''
#iou大于iou_threshold且中心点在gt box内的设置为正样本
is_pos = tf.logical_and(iou_matrix>=iou_threshold,is_center_in_gtboxes)
iou_matrix = tf.where(is_pos,iou_matrix,tf.zeros_like(iou_matrix))
scores,index = tf.nn.top_k(tf.transpose(iou_matrix,perm=[0,2,1]),k=1)
B,Y,_ = btf.combined_static_and_dynamic_shape(gboxes)
index = tf.squeeze(index,axis=-1)
scores = tf.squeeze(scores,axis=-1)
labels = wmlt.batch_gather(glabels,index,name="gather_labels",
parallel_iterations=B,
back_prop=False)
is_good_score = tf.greater(scores,self.MIN_IOU_THRESHOLD)
labels = tf.where(is_good_score,labels,tf.zeros_like(labels))
index = tf.where(is_good_score,index,tf.ones_like(index)*-1)
#iou_matrix=iou_matrix[:1,:glength[0]]
#iou_matrix = tf.reduce_sum(iou_matrix,axis=-1)
#wsummary.histogram_or_scalar(iou_matrix,"iou_matrix")
if self.same_pos_label:
labels = tf.where(tf.greater(labels, 0), tf.ones_like(labels) * self.same_pos_label, labels)
return tf.stop_gradient(labels),tf.stop_gradient(scores),tf.stop_gradient(index)
@MATCHER.register()
class ATSSMatcher3(wmodule.WChildModule):
MIN_IOU_THRESHOLD = 0.1
def __init__(self,thresholds,same_pos_label=None,*args,**kwargs):
'''
'''
super().__init__(*args,**kwargs)
self.same_pos_label = same_pos_label
self.thresholds = thresholds
print(f"ATSSMatcher v3.0, thresholds={self.thresholds}")
@wmlt.add_name_scope
def get_threshold(self,iou_matrix):
'''
iou_matrix: [B,GT_nr,Anchor_nr]
X = GT_nr, Y=Anchor_nr
return:
[B,GT]
'''
B,X,Y = btf.combined_static_and_dynamic_shape(iou_matrix)
iou_matrix = tf.reshape(iou_matrix,[B*X,Y])
def fn(ious):
mask = tf.greater(ious,self.MIN_IOU_THRESHOLD)
def fn0():
p_ious = tf.boolean_mask(ious,mask)
mean,var = tf.nn.moments(p_ious,axes=-1)
std = tf.sqrt(var)
return mean+std
def fn1():
return tf.constant(1.0,dtype=tf.float32)
return tf.cond(tf.reduce_any(mask),fn0,fn1)
threshold = tf.map_fn(fn,elems=iou_matrix,back_prop=False)
threshold = tf.reshape(threshold,[B,X])
return tf.stop_gradient(threshold)
def forward(self,boxes,gboxes,glabels,glength,*args,**kwargs):
'''
:param boxes: [1,X,4] or [batch_size,X,4] proposal boxes
:param gboxes: [batch_size,Y,4] groundtruth boxes
:param glabels: [batch_size,Y] groundtruth labels
:param glength: [batch_size] boxes size
:return:
labels: [batch_size,X,4], the label of boxes, -1 indict ignored box, which will not calculate loss,
0 is background
scores: [batch_size,X], the overlap score with boxes' match gt box
indices: [batch_size,X] the index of matched gt boxes when it's a positive anchor box, else it's -1
'''
with tf.name_scope("ATTSMatcher3"):
iou_matrix = odb.batch_bboxes_pair_wrapv2(gboxes,boxes,
fn=odb.get_iou_matrix,
len0=glength,
scope="get_iou_matrix")
is_center_in_gtboxes = odb.batch_bboxes_pair_wrapv2(gboxes,boxes,
fn=odb.is_center_in_boxes,
len0=glength,
dtype=tf.bool,
scope="get_is_center_in_gtbboxes")
wsummary.variable_summaries_v2(iou_matrix,"iou_matrix")
with tf.device("/cpu:0"):
iou_threshold = self.get_threshold(iou_matrix)
iou_threshold = tf.minimum(iou_threshold,self.thresholds[-1])
iou_matrix = tf.where(is_center_in_gtboxes,iou_matrix,tf.zeros_like(iou_matrix))
scores,index = tf.nn.top_k(tf.transpose(iou_matrix,perm=[0,2,1]),k=1)
B,Y,_ = btf.combined_static_and_dynamic_shape(gboxes)
index = tf.squeeze(index,axis=-1)
scores = tf.squeeze(scores,axis=-1)
threshold = wmlt.batch_gather(iou_threshold,index)
labels = wmlt.batch_gather(glabels,index,name="gather_labels",
parallel_iterations=B,
back_prop=False)
is_good_score = tf.greater(scores,self.MIN_IOU_THRESHOLD)
is_good_score = tf.logical_and(is_good_score,scores>=threshold)
labels = tf.where(is_good_score,labels,tf.zeros_like(labels))
margin = self.thresholds[-1]-self.thresholds[0]
is_in_mid_threshold = tf.logical_and(scores<threshold,scores>threshold-margin)
is_ignore = tf.logical_and(is_in_mid_threshold,scores>self.MIN_IOU_THRESHOLD+margin)
labels = tf.where(is_ignore,tf.ones_like(labels)*-1,labels)
index = tf.where(is_good_score,index,tf.ones_like(index)*-1)
if self.same_pos_label:
labels = tf.where(tf.greater(labels, 0), tf.ones_like(labels) * self.same_pos_label, labels)
return tf.stop_gradient(labels),tf.stop_gradient(scores),tf.stop_gradient(index)
@MATCHER.register()
class ATSSMatcher4(wmodule.WChildModule):
'''
相比于ATSSMatcher3, ATSSMatcher4不会处理threshold[0]与threshold[1]之间的这部分样本
具体为:与gt iou>MIN_IOU_THRESHOLD的所有proposal box参与统计,以mean+std为正负样本
的threshold, 但threshold不大于self.thresholds[-1], 除此之外正样本的中心点必须在gt内
'''
MIN_IOU_THRESHOLD = 0.1
def __init__(self,thresholds,same_pos_label=None,*args,**kwargs):
'''
'''
super().__init__(*args,**kwargs)
self.same_pos_label = same_pos_label
self.thresholds = thresholds
print(f"ATSSMatcher v4.0, thresholds={self.thresholds}")
@wmlt.add_name_scope
def get_threshold(self,iou_matrix):
'''
iou_matrix: [B,GT_nr,Anchor_nr]
X = GT_nr, Y=Anchor_nr
return:
[B,GT]
'''
B,X,Y = btf.combined_static_and_dynamic_shape(iou_matrix)
iou_matrix = tf.reshape(iou_matrix,[B*X,Y])
def fn(ious):
mask = tf.greater(ious,self.MIN_IOU_THRESHOLD)
def fn0():
p_ious = tf.boolean_mask(ious,mask)
mean,var = tf.nn.moments(p_ious,axes=-1)
std = tf.sqrt(var)
return mean+std
def fn1():
return tf.constant(1.0,dtype=tf.float32)
return tf.cond(tf.reduce_any(mask),fn0,fn1)
threshold = tf.map_fn(fn,elems=iou_matrix,back_prop=False)
threshold = tf.reshape(threshold,[B,X])
return tf.stop_gradient(threshold)
def forward(self,boxes,gboxes,glabels,glength,*args,**kwargs):
'''
:param boxes: [1,X,4] or [batch_size,X,4] proposal boxes
:param gboxes: [batch_size,Y,4] groundtruth boxes
:param glabels: [batch_size,Y] groundtruth labels
:param glength: [batch_size] boxes size
:return:
labels: [batch_size,X,4], the label of boxes, -1 indict ignored box, which will not calculate loss,
0 is background
scores: [batch_size,X], the overlap score with boxes' match gt box
indices: [batch_size,X] the index of matched gt boxes when it's a positive anchor box, else it's -1
'''
with tf.name_scope("ATTSMatcher4"):
iou_matrix = odb.batch_bboxes_pair_wrapv2(gboxes,boxes,
fn=odb.get_iou_matrix,
len0=glength,
scope="get_iou_matrix")
is_center_in_gtboxes = odb.batch_bboxes_pair_wrapv2(gboxes,boxes,
fn=odb.is_center_in_boxes,
len0=glength,
dtype=tf.bool,
scope="get_is_center_in_gtbboxes")
wsummary.variable_summaries_v2(iou_matrix,"iou_matrix")
with tf.device("/cpu:0"):
iou_threshold = self.get_threshold(iou_matrix)
iou_threshold = tf.minimum(iou_threshold,self.thresholds[-1])
iou_matrix = tf.where(is_center_in_gtboxes,iou_matrix,tf.zeros_like(iou_matrix))
scores,index = tf.nn.top_k(tf.transpose(iou_matrix,perm=[0,2,1]),k=1)
B,Y,_ = btf.combined_static_and_dynamic_shape(gboxes)
index = tf.squeeze(index,axis=-1)
scores = tf.squeeze(scores,axis=-1)
threshold = wmlt.batch_gather(iou_threshold,index)
labels = wmlt.batch_gather(glabels,index,name="gather_labels",
parallel_iterations=B,
back_prop=False)
is_good_score = tf.greater(scores,self.MIN_IOU_THRESHOLD)
is_good_score = tf.logical_and(is_good_score,scores>=threshold)
labels = tf.where(is_good_score,labels,tf.zeros_like(labels))
index = tf.where(is_good_score,index,tf.ones_like(index)*-1)
if self.same_pos_label:
labels = tf.where(tf.greater(labels, 0), tf.ones_like(labels) * self.same_pos_label, labels)
return tf.stop_gradient(labels),tf.stop_gradient(scores),tf.stop_gradient(index)
@MATCHER.register()
class DynamicMatcher(wmodule.WChildModule):
MIN_IOU_THRESHOLD = 0.1
def __init__(self,thresholds=[0.0],same_pos_label=None,*args,**kwargs):
'''
'''
super().__init__(*args,**kwargs)
self.same_pos_label = same_pos_label
self.thresholds = thresholds
print(f"DynamicMatcher v1.0, thresholds={self.thresholds}")
@staticmethod
def moments(data,weights,threshold,axes=-1):
mask = tf.greater_equal(data,threshold)
mask_f = tf.cast(mask,data.dtype)
if weights.dtype != data.dtype:
weights = tf.cast(weights,data.dtype)
mask_wf = mask_f*weights
data_f = tf.where(mask,data,tf.zeros_like(data))
data_wf = data_f*weights
data_sum = tf.reduce_sum(data_wf,axis=axes,keepdims=True)
data_nr = tf.maximum(tf.reduce_sum(mask_wf,axis=axes,keepdims=True),1)
data_mean = data_sum/data_nr
s_diff = tf.squared_difference(data, tf.stop_gradient(data_mean))
s_diff = tf.where(mask,s_diff,tf.zeros_like(s_diff))*weights
variance = tf.reduce_sum(
s_diff,
axis=axes,
keepdims=True,
name="variance")/data_nr
return data_mean,variance
@wmlt.add_name_scope
def get_threshold(self,iou_matrix,anchor_weights):
'''
iou_matrix: [B,GT_nr,Anchor_nr]
X = GT_nr, Y=Anchor_nr
return:
[B,GT]
'''
B,GT_nr,Anchor_nr = wmlt.combined_static_and_dynamic_shape(iou_matrix)
anchor_weights = tf.reshape(anchor_weights,[1,1,Anchor_nr])
iou_mean, iou_var = self.moments(iou_matrix, weights=anchor_weights,
threshold=self.MIN_IOU_THRESHOLD, axes=[-1])
iou_std = tf.sqrt(iou_var)
iou_threshold = iou_mean + iou_std
iou_threshold = tf.squeeze(iou_threshold,axis=-1)
return tf.stop_gradient(iou_threshold)
@wmlt.add_name_scope
def get_anchor_weights(self,boxes_len):
boxes_len_f = [tf.to_float(x) for x in boxes_len]
scales = [tf.to_float(boxes_len[0])/x for x in boxes_len_f]
weights = []
for s,l in zip(scales,boxes_len):
w = tf.ones(shape=[l],dtype=tf.float32)*s
weights.append(w)
return tf.concat(weights,axis=-1)
def forward(self,boxes,gboxes,glabels,glength,boxes_len,*args,**kwargs):
'''
:param boxes: [1,X,4] or [batch_size,X,4] proposal boxes
:param gboxes: [batch_size,Y,4] groundtruth boxes
:param glabels: [batch_size,Y] groundtruth labels
:param glength: [batch_size] boxes size
:param boxes_len: [len0,len1,len2,...] sum(boxes_len)=X, boxes len in each layer
:return:
labels: [batch_size,X,4], the label of boxes, -1 indict ignored box, which will not calculate loss,
0 is background
scores: [batch_size,X], the overlap score with boxes' match gt box
indices: [batch_size,X] the index of matched gt boxes when it's a positive anchor box, else it's -1
'''
with tf.name_scope("DynamicMatcher"):
assert isinstance(boxes_len,(list,tuple)), "error boxes len type."
iou_matrix = odb.batch_bboxes_pair_wrapv2(gboxes,boxes,
fn=odb.get_iou_matrix,
len0=glength,
scope="get_iou_matrix")
is_center_in_gtboxes = odb.batch_bboxes_pair_wrapv2(gboxes,boxes,
fn=odb.is_center_in_boxes,
len0=glength,
dtype=tf.bool,
scope="get_is_center_in_gtbboxes")
wsummary.variable_summaries_v2(iou_matrix,"iou_matrix")
with tf.device("/cpu:0"):
anchor_weights = self.get_anchor_weights(boxes_len)
iou_threshold = self.get_threshold(iou_matrix,anchor_weights)
if self.thresholds[-1]>self.MIN_IOU_THRESHOLD:
print(f"DynamicMatcher use thresholds ceiling {self.thresholds[-1]}.")
iou_threshold = tf.minimum(iou_threshold,self.thresholds[-1])
iou_matrix = tf.where(is_center_in_gtboxes,iou_matrix,tf.zeros_like(iou_matrix))
scores,index = tf.nn.top_k(tf.transpose(iou_matrix,perm=[0,2,1]),k=1)
B,Y,_ = btf.combined_static_and_dynamic_shape(gboxes)
index = tf.squeeze(index,axis=-1)
scores = tf.squeeze(scores,axis=-1)
threshold = wmlt.batch_gather(iou_threshold,index)
labels = wmlt.batch_gather(glabels,index,name="gather_labels",
parallel_iterations=B,
back_prop=False)
is_good_score = tf.greater(scores,self.MIN_IOU_THRESHOLD)
is_good_score = tf.logical_and(is_good_score,scores>=threshold)
labels = tf.where(is_good_score,labels,tf.zeros_like(labels))
index = tf.where(is_good_score,index,tf.ones_like(index)*-1)
if self.same_pos_label:
labels = tf.where(tf.greater(labels, 0), tf.ones_like(labels) * self.same_pos_label, labels)
return tf.stop_gradient(labels),tf.stop_gradient(scores),tf.stop_gradient(index)
@MATCHER.register()
class MatcherV2(wmodule.WChildModule):
def __init__(self,thresholds,same_pos_label=None,*args,**kwargs):
'''
:param thresholds: [threshold] or [threshold_low,threshold_high]
:param allow_low_quality_matches: if it's true, the box which match some gt box best will be set to positive
:param same_pos_label: int, if it's not None, then all positive boxes' label will be set to same_pos_label
'''
super().__init__(*args,**kwargs)
if len(thresholds) == 1:
thresholds = [thresholds[0],thresholds[0]]
print(f"MatcherV2, thresholds={thresholds}")
self.thresholds = thresholds
self.same_pos_label = same_pos_label
@btf.show_input_shape
def forward(self,boxes,gboxes,glabels,glength,*args,**kwargs):
'''
:param boxes: [1,X,4] or [batch_size,X,4] proposal boxes
:param gboxes: [batch_size,Y,4] groundtruth boxes
:param glabels: [batch_size,Y] groundtruth labels
:param glength: [batch_size] boxes size
:return:
labels: [batch_size,X,4], the label of boxes, -1 indict ignored box, which will not calculate loss,
0 is background
scores: [batch_size,X], the overlap score with boxes' match gt box
indices: [batch_size,X] the index of matched gt boxes when it's a positive anchor box, else it's -1
'''
labels,scores,indices = tfop.matcherv2(bboxes=boxes,gboxes=gboxes,
glabels=glabels,
length=glength,
threshold=self.thresholds)
if self.same_pos_label:
labels = tf.where(tf.greater(labels,0),tf.ones_like(labels)*self.same_pos_label,labels)
return labels,scores,indices
| 49.565574
| 116
| 0.582892
| 3,023
| 24,188
| 4.42309
| 0.092623
| 0.041059
| 0.030514
| 0.021539
| 0.808466
| 0.799641
| 0.780495
| 0.765762
| 0.758283
| 0.751103
| 0
| 0.012404
| 0.316727
| 24,188
| 487
| 117
| 49.667351
| 0.796636
| 0.185836
| 0
| 0.7125
| 0
| 0
| 0.035947
| 0.01225
| 0
| 0
| 0
| 0
| 0.00625
| 1
| 0.075
| false
| 0
| 0.025
| 0.00625
| 0.1875
| 0.021875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
67923d79383bacce58cc21025af0cdbeaeec2c3c
| 19,877
|
py
|
Python
|
tests/conftest.py
|
verbosemode/scrapli
|
b3885169dccf24ac65d0d433eae16bcab8288002
|
[
"MIT"
] | 404
|
2020-02-11T09:05:40.000Z
|
2022-03-31T05:10:03.000Z
|
tests/conftest.py
|
verbosemode/scrapli
|
b3885169dccf24ac65d0d433eae16bcab8288002
|
[
"MIT"
] | 155
|
2020-02-18T00:21:43.000Z
|
2022-03-06T16:34:47.000Z
|
tests/conftest.py
|
verbosemode/scrapli
|
b3885169dccf24ac65d0d433eae16bcab8288002
|
[
"MIT"
] | 48
|
2020-04-02T00:24:44.000Z
|
2022-03-07T18:24:53.000Z
|
from pathlib import Path
import pytest
from devices import DEVICES
from helper import (
arista_eos_clean_response,
cisco_iosxe_clean_response,
cisco_iosxr_clean_response,
cisco_nxos_clean_response,
juniper_junos_clean_response,
)
import scrapli
TEST_DATA_PATH = f"{Path(scrapli.__file__).parents[1]}/tests/test_data"
@pytest.fixture(scope="session")
def test_data_path():
"""Fixture to provide path to test data files"""
return TEST_DATA_PATH
@pytest.fixture(scope="session")
def test_devices_dict():
"""Fixture to return test devices dict"""
return DEVICES
TEST_CASES = {
"cisco_iosxe": {
"get_prompt": {
"exec": "csr1000v>",
"privilege_exec": "csr1000v#",
"configuration": "csr1000v(config)#",
},
"send_command_short": {
"command": "show run | i hostname",
"expected_no_strip": "hostname csr1000v\ncsr1000v#",
"expected_strip": "hostname csr1000v",
},
"send_command_long": {
"command": "show run",
"expected_no_strip": open(
f"{TEST_DATA_PATH}/expected/cisco_iosxe/send_command_long_no_strip"
).read(),
"expected_strip": open(
f"{TEST_DATA_PATH}/expected/cisco_iosxe/send_command_long_strip"
).read(),
},
"send_commands_from_file": {
"file": f"{TEST_DATA_PATH}/source/cisco_iosxe/send_commands",
"expected_no_strip": ["hostname csr1000v\ncsr1000v#", "hostname csr1000v\ncsr1000v#"],
"expected_strip": ["hostname csr1000v", "hostname csr1000v"],
},
"send_commands_error": {
"commands": ["show version", "show tacocat", "show version"],
},
"send_interactive_normal_response": {
"command": [("clear logg", "Clear logging buffer [confirm]"), ("", "csr1000v#")],
"expected": "clear logg\nClear logging buffer [confirm]\n\ncsr1000v#",
},
"send_interactive_hidden_response": None,
"send_config": {
"configs": "interface loopback123\ndescription scrapli was here",
"expected_no_strip": "csr1000v(config-if)#\ncsr1000v(config-if)#",
"expected_strip": "\n",
"verification": "show run int loopback123",
"verification_expected_no_strip": "Building configuration...\n\nCurrent configuration : CONFIG_BYTES"
"\n!\ninterface Loopback123\n description scrapli was here\n no ip"
" address\nend\n\ncsr1000v#",
"verification_expected_strip": "Building configuration...\n\nCurrent configuration : CONFIG_BYTES"
"\n!\ninterface Loopback123\n description scrapli was here\n no ip "
"address\nend",
"teardown_configs": "no interface loopback123",
},
"send_configs": {
"configs": ["interface loopback123", "description scrapli was here"],
"expected_no_strip": ["csr1000v(config-if)#", "csr1000v(config-if)#"],
"expected_strip": ["", ""],
"verification": "show run int loopback123",
"verification_expected_no_strip": "Building configuration...\n\nCurrent configuration : CONFIG_BYTES"
"\n!\ninterface Loopback123\n description scrapli was here\n no ip"
" address\nend\n\ncsr1000v#",
"verification_expected_strip": "Building configuration...\n\nCurrent configuration : CONFIG_BYTES"
"\n!\ninterface Loopback123\n description scrapli was here\n no ip "
"address\nend",
"teardown_configs": "no interface loopback123",
},
"send_configs_from_file": {
"file": f"{TEST_DATA_PATH}/source/cisco_iosxe/send_configs",
"expected_no_strip": ["csr1000v(config-if)#", "csr1000v(config-if)#"],
"expected_strip": ["", ""],
"teardown_configs": "no interface loopback123",
},
"send_configs_error": {
"configs": ["interface loopback123", "show tacocat", "description tacocat was here"],
"teardown_configs": "no interface loopback123",
},
"sanitize_response": cisco_iosxe_clean_response,
},
"cisco_nxos": {
"get_prompt": {
"exec": None,
"privilege_exec": "switch#",
"configuration": "switch(config)#",
},
"send_command_short": {
"command": "show run | i scp-server",
"expected_no_strip": "feature scp-server\nswitch#",
"expected_strip": "feature scp-server",
},
"send_command_long": {
"command": "show run",
"expected_no_strip": open(
f"{TEST_DATA_PATH}/expected/cisco_nxos/send_command_long_no_strip"
).read(),
"expected_strip": open(
f"{TEST_DATA_PATH}/expected/cisco_nxos/send_command_long_strip"
).read(),
},
"send_commands_from_file": {
"file": f"{TEST_DATA_PATH}/source/cisco_nxos/send_commands",
"expected_no_strip": ["feature scp-server\nswitch#", "feature scp-server\nswitch#"],
"expected_strip": ["feature scp-server", "feature scp-server"],
},
"send_commands_error": {
"commands": ["show version", "show tacocat", "show version"],
},
"send_interactive_normal_response": {
"command": [
("delete bootflash:virtual-instance.conf", "(yes/no/abort) [y]"),
("n", "switch#"),
],
"expected": 'delete bootflash:virtual-instance.conf\nDo you want to delete "/virtual-instance.conf" ? (yes/no/abort) [y] n\nswitch#',
},
"send_interactive_hidden_response": None,
"send_config": {
"configs": "interface loopback123\ndescription scrapli was here",
"expected_no_strip": "switch(config-if)#\nswitch(config-if)#",
"expected_strip": "\n",
"verification": "show run int loopback123",
"verification_expected_no_strip": "!Command: show running-config interface loopback123\n!Running "
"configuration last done at: TIME_STAMP_REPLACED\n!Time: "
"TIME_STAMP_REPLACED\n\nversion 9.2(4) Bios:version\n\ninterface "
"loopback123\n description scrapli was here\n\nswitch#",
"verification_expected_strip": "!Command: show running-config interface loopback123\n!Running "
"configuration last done at: TIME_STAMP_REPLACED\n!Time: "
"TIME_STAMP_REPLACED\n\nversion 9.2(4) Bios:version\n\ninterface "
"loopback123\n description scrapli was here",
"teardown_configs": "no interface loopback123",
},
"send_configs": {
"configs": ["interface loopback123", "description scrapli was here"],
"expected_no_strip": ["switch(config-if)#", "switch(config-if)#"],
"expected_strip": ["", ""],
"verification": "show run int loopback123",
"verification_expected_no_strip": "!Command: show running-config interface loopback123\n!Running "
"configuration last done at: TIME_STAMP_REPLACED\n!Time: "
"TIME_STAMP_REPLACED\n\nversion 9.2(4) Bios:version\n\ninterface "
"loopback123\n description scrapli was here\n\nswitch#",
"verification_expected_strip": "!Command: show running-config interface loopback123\n!Running "
"configuration last done at: TIME_STAMP_REPLACED\n!Time: "
"TIME_STAMP_REPLACED\n\nversion 9.2(4) Bios:version\n\ninterface "
"loopback123\n description scrapli was here",
"teardown_configs": "no interface loopback123",
},
"send_configs_from_file": {
"file": f"{TEST_DATA_PATH}/source/cisco_nxos/send_configs",
"expected_no_strip": ["switch(config-if)#", "switch(config-if)#"],
"expected_strip": ["", ""],
"teardown_configs": "no interface loopback123",
},
"send_configs_error": {
"configs": ["interface loopback123", "show tacocat", "description tacocat was here"],
"teardown_configs": "no interface loopback123",
},
"sanitize_response": cisco_nxos_clean_response,
},
"cisco_iosxr": {
"get_prompt": {
"exec": None,
"privilege_exec": "RP/0/RP0/CPU0:ios#",
"configuration": "RP/0/RP0/CPU0:ios(config)#",
},
"send_command_short": {
"command": "show run | i MgmtEth0",
"expected_no_strip": "TIME_STAMP_REPLACED\nBuilding configuration...\ninterface MgmtEth0/RP0/CPU0/0\nRP/0/RP0/CPU0:ios#",
"expected_strip": "TIME_STAMP_REPLACED\nBuilding configuration...\ninterface MgmtEth0/RP0/CPU0/0",
},
"send_command_long": {
"command": "show run",
"expected_no_strip": open(
f"{TEST_DATA_PATH}/expected/cisco_iosxr/send_command_long_no_strip"
).read(),
"expected_strip": open(
f"{TEST_DATA_PATH}/expected/cisco_iosxr/send_command_long_strip"
).read(),
},
"send_commands_from_file": {
"file": f"{TEST_DATA_PATH}/source/cisco_iosxr/send_commands",
"expected_no_strip": [
"TIME_STAMP_REPLACED\nBuilding configuration...\ninterface MgmtEth0/RP0/CPU0/0\nRP/0/RP0/CPU0:ios#",
"TIME_STAMP_REPLACED\nBuilding configuration...\ninterface MgmtEth0/RP0/CPU0/0\nRP/0/RP0/CPU0:ios#",
],
"expected_strip": [
"TIME_STAMP_REPLACED\nBuilding configuration...\ninterface MgmtEth0/RP0/CPU0/0",
"TIME_STAMP_REPLACED\nBuilding configuration...\ninterface MgmtEth0/RP0/CPU0/0",
],
},
"send_commands_error": {
"commands": ["show version", "show tacocat", "show version"],
},
"send_interactive_normal_response": None,
"send_interactive_hidden_response": None,
"send_config": {
"configs": "interface loopback123\ndescription scrapli was here\ncommit",
"expected_no_strip": "RP/0/RP0/CPU0:ios(config-if)#\nRP/0/RP0/CPU0:ios(config-if)#\nTIME_STAMP_REPLACED\nRP/0/RP0/CPU0:ios(config-if)#",
"expected_strip": "\n\nTIME_STAMP_REPLACED", # we get the timestamp of the commit in this output
"verification": "show run int loopback123",
"verification_expected_no_strip": "TIME_STAMP_REPLACED\ninterface Loopback123\n description scrapli was here\n!\n\nRP/0/RP0/CPU0:ios#",
"verification_expected_strip": "TIME_STAMP_REPLACED\ninterface Loopback123\n description scrapli was here\n!",
"teardown_configs": ["no interface loopback123", "commit"],
},
"send_configs": {
"configs": ["interface loopback123", "description scrapli was here", "commit"],
"expected_no_strip": ["RP/0/RP0/CPU0:ios(config-if)#", "RP/0/RP0/CPU0:ios(config-if)#"],
"expected_strip": ["", ""],
"verification": "show run int loopback123",
"verification_expected_no_strip": "TIME_STAMP_REPLACED\ninterface Loopback123\n description scrapli was here\n!\n\nRP/0/RP0/CPU0:ios#",
"verification_expected_strip": "TIME_STAMP_REPLACED\ninterface Loopback123\n description scrapli was here\n!",
"teardown_configs": ["no interface loopback123", "commit"],
},
"send_configs_from_file": {
"file": f"{TEST_DATA_PATH}/source/cisco_iosxr/send_configs",
"expected_no_strip": ["RP/0/RP0/CPU0:ios(config-if)#", "RP/0/RP0/CPU0:ios(config-if)#"],
"expected_strip": ["", ""],
"teardown_configs": ["no interface loopback123", "commit"],
},
"send_configs_error": {
"configs": ["interface loopback123", "show tacocat", "description tacocat was here"],
"teardown_configs": ["no interface loopback123", "commit"],
},
"sanitize_response": cisco_iosxr_clean_response,
},
"arista_eos": {
"get_prompt": {
"exec": "localhost>",
"privilege_exec": "localhost#",
"configuration": "localhost(config)#",
},
"send_command_short": {
"command": "show run | i ZTP",
"expected_no_strip": "logging level ZTP informational\nlocalhost#",
"expected_strip": "logging level ZTP informational",
},
"send_command_long": {
"command": "show run",
"expected_no_strip": open(
f"{TEST_DATA_PATH}/expected/arista_eos/send_command_long_no_strip"
).read(),
"expected_strip": open(
f"{TEST_DATA_PATH}/expected/arista_eos/send_command_long_strip"
).read(),
},
"send_commands_from_file": {
"file": f"{TEST_DATA_PATH}/source/arista_eos/send_commands",
"expected_no_strip": [
"logging level ZTP informational\nlocalhost#",
"logging level ZTP informational\nlocalhost#",
],
"expected_strip": [
"logging level ZTP informational",
"logging level ZTP informational",
],
},
"send_commands_error": {
"commands": ["show version", "show tacocat", "show version"],
},
"send_interactive_normal_response": None,
"send_interactive_hidden_response": None,
"send_config": {
"configs": "interface loopback123\ndescription scrapli was here",
"expected_no_strip": "localhost(config-if-Lo123)#\nlocalhost(config-if-Lo123)#",
"expected_strip": "\n",
"verification": "show run int loopback123",
"verification_expected_no_strip": "interface Loopback123\n description scrapli was here\nlocalhost#",
"verification_expected_strip": "interface Loopback123\n description scrapli was here",
"teardown_configs": "no interface loopback123",
},
"send_configs": {
"configs": ["interface loopback123", "description scrapli was here"],
"expected_no_strip": ["localhost(config-if-Lo123)#", "localhost(config-if-Lo123)#"],
"expected_strip": ["", ""],
"verification": "show run int loopback123",
"verification_expected_no_strip": "interface Loopback123\n description scrapli was here\nlocalhost#",
"verification_expected_strip": "interface Loopback123\n description scrapli was here",
"teardown_configs": "no interface loopback123",
},
"send_configs_from_file": {
"file": f"{TEST_DATA_PATH}/source/arista_eos/send_configs",
"expected_no_strip": ["localhost(config-if-Lo123)#", "localhost(config-if-Lo123)#"],
"expected_strip": ["", ""],
"teardown_configs": "no interface loopback123",
},
"send_configs_error": {
"configs": ["interface loopback123", "show tacocat", "description tacocat was here"],
"teardown_configs": "no interface loopback123",
},
"sanitize_response": arista_eos_clean_response,
},
"juniper_junos": {
"get_prompt": {
"exec": "boxen>",
"privilege_exec": None,
"configuration": "boxen#",
},
"send_command_short": {
"command": "show configuration | match 10.0.0.15",
"expected_no_strip": " address 10.0.0.15/24;\n\nboxen>",
"expected_strip": " address 10.0.0.15/24;",
},
"send_command_long": {
"command": "show configuration",
"expected_no_strip": open(
f"{TEST_DATA_PATH}/expected/juniper_junos/send_command_long_no_strip"
).read(),
"expected_strip": open(
f"{TEST_DATA_PATH}/expected/juniper_junos/send_command_long_strip"
).read(),
},
"send_commands_from_file": {
"file": f"{TEST_DATA_PATH}/source/juniper_junos/send_commands",
"expected_no_strip": [
" address 10.0.0.15/24;\n\nboxen>",
" address 10.0.0.15/24;\n\nboxen>",
],
"expected_strip": [
" address 10.0.0.15/24;",
" address 10.0.0.15/24;",
],
},
"send_commands_error": {
"commands": ["show version", "show tacocat", "show version"],
},
"send_interactive_normal_response": None,
"send_interactive_hidden_response": None,
"send_config": {
"configs": 'set interfaces fxp0 unit 0 description "scrapli was here"\ncommit',
"expected_no_strip": "[edit]\nboxen#\ncommit complete\n\n[edit]\nboxen#",
"expected_strip": "[edit]\ncommit complete\n\n[edit]",
"verification": "show configuration interfaces fxp0",
"verification_expected_no_strip": 'unit 0 {\n description "scrapli was here";\n family inet {\n address 10.0.0.15/24;\n }\n}\n\nboxen>',
"verification_expected_strip": 'unit 0 {\n description "scrapli was here";\n family inet {\n address 10.0.0.15/24;\n }\n}',
"teardown_configs": ["delete interfaces fxp0 unit 0 description", "commit"],
},
"send_configs": {
"configs": ['set interfaces fxp0 unit 0 description "scrapli was here"', "commit"],
"expected_no_strip": ["[edit]\nboxen#", "commit complete\n\n[edit]\nboxen#"],
"expected_strip": ["[edit]", "commit complete\n\n[edit]"],
"verification": "show configuration interfaces fxp0",
"verification_expected_no_strip": 'unit 0 {\n description "scrapli was here";\n family inet {\n address 10.0.0.15/24;\n }\n}\n\nboxen>',
"verification_expected_strip": 'unit 0 {\n description "scrapli was here";\n family inet {\n address 10.0.0.15/24;\n }\n}',
"teardown_configs": ["delete interfaces fxp0 unit 0 description", "commit"],
},
"send_configs_from_file": {
"file": f"{TEST_DATA_PATH}/source/juniper_junos/send_configs",
"expected_no_strip": ["[edit]\nboxen#", "commit complete\n\n[edit]\nboxen#"],
"expected_strip": ["[edit]", "commit complete\n\n[edit]"],
"teardown_configs": ["delete interfaces fxp0 unit 0 description", "commit"],
},
"send_configs_error": {
"configs": [
"set interfaces fxp0 description tacocat",
"show tacocat",
"set interfaces fxp0 description tacocat",
],
"teardown_configs": ["delete interfaces fxp0 description", "commit"],
},
"sanitize_response": juniper_junos_clean_response,
},
"linux": {
"get_prompt": "linux:~#",
"send_command_short": {
"command": "cat /etc/hostname",
"expected_no_strip": "linux\nlinux:~#",
"expected_strip": "linux",
},
"send_command_long": {
"command": "cat /etc/init.d/sshd",
"expected_no_strip": open(
f"{TEST_DATA_PATH}/expected/linux/send_command_long_no_strip"
).read(),
"expected_strip": open(
f"{TEST_DATA_PATH}/expected/linux/send_command_long_strip"
).read(),
},
},
}
@pytest.fixture(scope="session")
def test_cases():
"""Fixture to return test cases shared across functional and integration tests"""
return TEST_CASES
| 48.958128
| 164
| 0.589224
| 2,054
| 19,877
| 5.455209
| 0.082278
| 0.029987
| 0.056225
| 0.05801
| 0.896564
| 0.849799
| 0.824007
| 0.803124
| 0.770549
| 0.748862
| 0
| 0.031815
| 0.274186
| 19,877
| 405
| 165
| 49.079012
| 0.744853
| 0.010313
| 0
| 0.575448
| 0
| 0.028133
| 0.625458
| 0.232143
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007673
| false
| 0
| 0.012788
| 0
| 0.028133
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
67a817e7c9c9b31d3fd9fd3cfa4da3d6890f16ac
| 347
|
py
|
Python
|
src/GridCal/Engine/Simulations/PowerFlow/__init__.py
|
vineetjnair9/GridCal
|
5b63cbae45cbe176b015e5e99164a593f450fe71
|
[
"BSD-3-Clause"
] | null | null | null |
src/GridCal/Engine/Simulations/PowerFlow/__init__.py
|
vineetjnair9/GridCal
|
5b63cbae45cbe176b015e5e99164a593f450fe71
|
[
"BSD-3-Clause"
] | null | null | null |
src/GridCal/Engine/Simulations/PowerFlow/__init__.py
|
vineetjnair9/GridCal
|
5b63cbae45cbe176b015e5e99164a593f450fe71
|
[
"BSD-3-Clause"
] | null | null | null |
from GridCal.Engine.Simulations.PowerFlow.power_flow_options import *
from GridCal.Engine.Simulations.PowerFlow.power_flow_worker import *
from GridCal.Engine.Simulations.PowerFlow.power_flow_driver import *
from GridCal.Engine.Simulations.PowerFlow.time_series_driver import *
from GridCal.Engine.Simulations.PowerFlow.time_Series_input import *
| 57.833333
| 69
| 0.870317
| 45
| 347
| 6.488889
| 0.311111
| 0.188356
| 0.291096
| 0.479452
| 0.917808
| 0.917808
| 0.917808
| 0.760274
| 0.40411
| 0
| 0
| 0
| 0.057637
| 347
| 5
| 70
| 69.4
| 0.892966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
67ee0fca105651c153184405ac6b65e9abf950f5
| 196
|
py
|
Python
|
Vault7/Lost-in-Translation/windows/Resources/Ops/PyScripts/lib/ops/data/eventlogfilter.py
|
dendisuhubdy/grokmachine
|
120a21a25c2730ed356739231ec8b99fc0575c8b
|
[
"BSD-3-Clause"
] | 46
|
2017-05-15T11:15:08.000Z
|
2018-07-02T03:32:52.000Z
|
Vault7/Lost-in-Translation/windows/Resources/Ops/PyScripts/lib/ops/data/eventlogfilter.py
|
dendisuhubdy/grokmachine
|
120a21a25c2730ed356739231ec8b99fc0575c8b
|
[
"BSD-3-Clause"
] | null | null | null |
Vault7/Lost-in-Translation/windows/Resources/Ops/PyScripts/lib/ops/data/eventlogfilter.py
|
dendisuhubdy/grokmachine
|
120a21a25c2730ed356739231ec8b99fc0575c8b
|
[
"BSD-3-Clause"
] | 24
|
2017-05-17T03:26:17.000Z
|
2018-07-09T07:00:50.000Z
|
import ops.data
import ops.data.eventlogquery
if ('eventlogfilter' not in ops.data.cmd_definitions):
ops.data.cmd_definitions['eventlogfilter'] = ops.data.cmd_definitions['eventlogquery']
| 39.2
| 90
| 0.780612
| 25
| 196
| 6
| 0.4
| 0.233333
| 0.2
| 0.42
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 196
| 5
| 90
| 39.2
| 0.852273
| 0
| 0
| 0
| 0
| 0
| 0.213542
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.