hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c5f07912eec3eb14654ef8e1def61503ae139d74
| 148
|
py
|
Python
|
build_pipeline/config/__init__.py
|
jakob-bagterp/timer_for_python
|
351d0fa1336cfb001f7813225d46383f82f34a1e
|
[
"MIT"
] | 1
|
2021-12-06T18:04:26.000Z
|
2021-12-06T18:04:26.000Z
|
build_pipeline/config/__init__.py
|
jakob-bagterp/timer_for_python
|
351d0fa1336cfb001f7813225d46383f82f34a1e
|
[
"MIT"
] | null | null | null |
build_pipeline/config/__init__.py
|
jakob-bagterp/timer_for_python
|
351d0fa1336cfb001f7813225d46383f82f34a1e
|
[
"MIT"
] | null | null | null |
__all__ = ["directory"]
def package_name() -> str:
return "timer-for-python"
def package_install_name() -> str:
return "timer-for-python"
| 18.5
| 34
| 0.675676
| 19
| 148
| 4.894737
| 0.578947
| 0.215054
| 0.27957
| 0.387097
| 0.580645
| 0.580645
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168919
| 148
| 7
| 35
| 21.142857
| 0.756098
| 0
| 0
| 0.4
| 0
| 0
| 0.277027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.4
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
68048392d8148f554f794499da503e4ebd9694e8
| 15,521
|
gyp
|
Python
|
binding.gyp
|
jnovack/StormLib
|
d4ef4037cdf3b5016854553def1f6f5ceee9dbec
|
[
"MIT"
] | 1
|
2021-01-13T16:40:02.000Z
|
2021-01-13T16:40:02.000Z
|
binding.gyp
|
nydus/storm-replay
|
3ca553d5606bc78cb2e6a7250bdbe64563c9ec82
|
[
"MIT"
] | 6
|
2016-02-10T22:17:37.000Z
|
2020-06-10T22:45:17.000Z
|
binding.gyp
|
jnovack/StormLib
|
d4ef4037cdf3b5016854553def1f6f5ceee9dbec
|
[
"MIT"
] | 1
|
2016-02-10T21:01:11.000Z
|
2016-02-10T21:01:11.000Z
|
{
"targets": [
{
"target_name": "storm-replay",
"sources": [
"src/storm-replay.cpp",
"src/StormLib/src/adpcm/adpcm.cpp",
"src/StormLib/src/huffman/huff.cpp",
"src/StormLib/src/sparse/sparse.cpp",
"src/StormLib/src/FileStream.cpp",
"src/StormLib/src/SBaseCommon.cpp",
"src/StormLib/src/SBaseDumpData.cpp",
"src/StormLib/src/SBaseFileTable.cpp",
"src/StormLib/src/SBaseSubTypes.cpp",
"src/StormLib/src/SCompression.cpp",
"src/StormLib/src/SFileAddFile.cpp",
"src/StormLib/src/SFileAttributes.cpp",
"src/StormLib/src/SFileCompactArchive.cpp",
"src/StormLib/src/SFileCreateArchive.cpp",
"src/StormLib/src/SFileExtractFile.cpp",
"src/StormLib/src/SFileFindFile.cpp",
"src/StormLib/src/SFileGetFileInfo.cpp",
"src/StormLib/src/SFileListFile.cpp",
"src/StormLib/src/SFileOpenArchive.cpp",
"src/StormLib/src/SFileOpenFileEx.cpp",
"src/StormLib/src/SFilePatchArchives.cpp",
"src/StormLib/src/SFileReadFile.cpp",
"src/StormLib/src/SFileVerify.cpp",
"src/StormLib/src/libtomcrypt/src/hashes/sha1.c",
"src/StormLib/src/libtomcrypt/src/hashes/hash_memory.c",
"src/StormLib/src/libtomcrypt/src/hashes/md5.c",
"src/StormLib/src/libtomcrypt/src/misc/crypt_hash_is_valid.c",
"src/StormLib/src/libtomcrypt/src/misc/crypt_prng_descriptor.c",
"src/StormLib/src/libtomcrypt/src/misc/crypt_register_prng.c",
"src/StormLib/src/libtomcrypt/src/misc/crypt_ltc_mp_descriptor.c",
"src/StormLib/src/libtomcrypt/src/misc/crypt_find_hash.c",
"src/StormLib/src/libtomcrypt/src/misc/zeromem.c",
"src/StormLib/src/libtomcrypt/src/misc/base64_decode.c",
"src/StormLib/src/libtomcrypt/src/misc/crypt_register_hash.c",
"src/StormLib/src/libtomcrypt/src/misc/crypt_find_prng.c",
"src/StormLib/src/libtomcrypt/src/misc/crypt_prng_is_valid.c",
"src/StormLib/src/libtomcrypt/src/misc/crypt_hash_descriptor.c",
"src/StormLib/src/libtomcrypt/src/misc/crypt_libc.c",
"src/StormLib/src/libtomcrypt/src/misc/crypt_argchk.c",
"src/StormLib/src/libtomcrypt/src/math/multi.c",
"src/StormLib/src/libtomcrypt/src/math/ltm_desc.c",
"src/StormLib/src/libtomcrypt/src/math/rand_prime.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_decode_bit_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_decode_boolean.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_decode_choice.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_decode_ia5_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_decode_integer.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_decode_object_identifier.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_decode_octet_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_decode_printable_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_decode_sequence_ex.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_decode_sequence_flexi.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_decode_sequence_multi.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_decode_short_integer.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_decode_utctime.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_decode_utf8_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_encode_bit_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_encode_boolean.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_encode_ia5_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_encode_integer.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_encode_object_identifier.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_encode_octet_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_encode_printable_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_encode_sequence_ex.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_encode_sequence_multi.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_encode_set.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_encode_setof.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_encode_short_integer.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_encode_utctime.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_encode_utf8_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_length_bit_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_length_boolean.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_length_ia5_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_length_integer.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_length_object_identifier.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_length_octet_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_length_printable_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_length_sequence.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_length_utctime.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_sequence_free.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_length_utf8_string.c",
"src/StormLib/src/libtomcrypt/src/pk/asn1/der_length_short_integer.c",
"src/StormLib/src/libtomcrypt/src/pk/ecc/ltc_ecc_projective_dbl_point.c",
"src/StormLib/src/libtomcrypt/src/pk/ecc/ltc_ecc_mulmod.c",
"src/StormLib/src/libtomcrypt/src/pk/ecc/ltc_ecc_projective_add_point.c",
"src/StormLib/src/libtomcrypt/src/pk/ecc/ltc_ecc_map.c",
"src/StormLib/src/libtomcrypt/src/pk/ecc/ltc_ecc_points.c",
"src/StormLib/src/libtomcrypt/src/pk/ecc/ltc_ecc_mul2add.c",
"src/StormLib/src/libtomcrypt/src/pk/pkcs1/pkcs_1_v1_5_decode.c",
"src/StormLib/src/libtomcrypt/src/pk/pkcs1/pkcs_1_v1_5_encode.c",
"src/StormLib/src/libtomcrypt/src/pk/pkcs1/pkcs_1_pss_decode.c",
"src/StormLib/src/libtomcrypt/src/pk/pkcs1/pkcs_1_pss_encode.c",
"src/StormLib/src/libtomcrypt/src/pk/pkcs1/pkcs_1_mgf1.c",
"src/StormLib/src/libtomcrypt/src/pk/pkcs1/pkcs_1_oaep_decode.c",
"src/StormLib/src/libtomcrypt/src/pk/rsa/rsa_make_key.c",
"src/StormLib/src/libtomcrypt/src/pk/rsa/rsa_free.c",
"src/StormLib/src/libtomcrypt/src/pk/rsa/rsa_verify_simple.c",
"src/StormLib/src/libtomcrypt/src/pk/rsa/rsa_import.c",
"src/StormLib/src/libtomcrypt/src/pk/rsa/rsa_verify_hash.c",
"src/StormLib/src/libtomcrypt/src/pk/rsa/rsa_sign_hash.c",
"src/StormLib/src/libtomcrypt/src/pk/rsa/rsa_exptmod.c",
"src/StormLib/src/libtommath/bn_mp_exptmod_fast.c",
"src/StormLib/src/libtommath/bn_mp_jacobi.c",
"src/StormLib/src/libtommath/bn_mp_mod.c",
"src/StormLib/src/libtommath/bn_mp_signed_bin_size.c",
"src/StormLib/src/libtommath/bn_mp_invmod.c",
"src/StormLib/src/libtommath/bn_mp_is_square.c",
"src/StormLib/src/libtommath/bn_mp_neg.c",
"src/StormLib/src/libtommath/bn_mp_reduce_2k.c",
"src/StormLib/src/libtommath/bn_mp_xor.c",
"src/StormLib/src/libtommath/bn_mp_karatsuba_mul.c",
"src/StormLib/src/libtommath/bn_mp_dr_setup.c",
"src/StormLib/src/libtommath/bn_mp_mul.c",
"src/StormLib/src/libtommath/bn_mp_init_multi.c",
"src/StormLib/src/libtommath/bn_mp_clear.c",
"src/StormLib/src/libtommath/bn_s_mp_sqr.c",
"src/StormLib/src/libtommath/bn_mp_rshd.c",
"src/StormLib/src/libtommath/bn_s_mp_sub.c",
"src/StormLib/src/libtommath/bn_mp_sub.c",
"src/StormLib/src/libtommath/bn_mp_toradix.c",
"src/StormLib/src/libtommath/bn_mp_reduce.c",
"src/StormLib/src/libtommath/bn_mp_prime_is_prime.c",
"src/StormLib/src/libtommath/bn_mp_prime_next_prime.c",
"src/StormLib/src/libtommath/bn_mp_exptmod.c",
"src/StormLib/src/libtommath/bn_mp_mod_2d.c",
"src/StormLib/src/libtommath/bn_reverse.c",
"src/StormLib/src/libtommath/bn_mp_init.c",
"src/StormLib/src/libtommath/bn_fast_s_mp_sqr.c",
"src/StormLib/src/libtommath/bn_mp_sqr.c",
"src/StormLib/src/libtommath/bn_mp_cnt_lsb.c",
"src/StormLib/src/libtommath/bn_mp_clear_multi.c",
"src/StormLib/src/libtommath/bn_mp_exch.c",
"src/StormLib/src/libtommath/bn_fast_s_mp_mul_digs.c",
"src/StormLib/src/libtommath/bn_mp_grow.c",
"src/StormLib/src/libtommath/bn_mp_read_radix.c",
"src/StormLib/src/libtommath/bn_mp_mul_2.c",
"src/StormLib/src/libtommath/bn_mp_shrink.c",
"src/StormLib/src/libtommath/bn_mp_div_2.c",
"src/StormLib/src/libtommath/bn_fast_mp_invmod.c",
"src/StormLib/src/libtommath/bn_mp_prime_miller_rabin.c",
"src/StormLib/src/libtommath/bn_mp_to_unsigned_bin.c",
"src/StormLib/src/libtommath/bn_mp_prime_rabin_miller_trials.c",
"src/StormLib/src/libtommath/bn_mp_2expt.c",
"src/StormLib/src/libtommath/bn_mp_cmp_mag.c",
"src/StormLib/src/libtommath/bn_mp_to_signed_bin.c",
"src/StormLib/src/libtommath/bn_mp_get_int.c",
"src/StormLib/src/libtommath/bn_mp_montgomery_reduce.c",
"src/StormLib/src/libtommath/bn_mp_dr_reduce.c",
"src/StormLib/src/libtommath/bn_mp_fwrite.c",
"src/StormLib/src/libtommath/bn_mp_and.c",
"src/StormLib/src/libtommath/bn_mp_exteuclid.c",
"src/StormLib/src/libtommath/bn_fast_mp_montgomery_reduce.c",
"src/StormLib/src/libtommath/bn_s_mp_mul_high_digs.c",
"src/StormLib/src/libtommath/bn_mp_reduce_setup.c",
"src/StormLib/src/libtommath/bn_mp_lcm.c",
"src/StormLib/src/libtommath/bn_mp_abs.c",
"src/StormLib/src/libtommath/bn_mp_cmp.c",
"src/StormLib/src/libtommath/bn_mp_submod.c",
"src/StormLib/src/libtommath/bn_mp_div_d.c",
"src/StormLib/src/libtommath/bn_s_mp_mul_digs.c",
"src/StormLib/src/libtommath/bn_mp_mul_d.c",
"src/StormLib/src/libtommath/bn_mp_to_unsigned_bin_n.c",
"src/StormLib/src/libtommath/bn_mp_prime_random_ex.c",
"src/StormLib/src/libtommath/bn_mp_rand.c",
"src/StormLib/src/libtommath/bn_mp_div_2d.c",
"src/StormLib/src/libtommath/bn_mp_addmod.c",
"src/StormLib/src/libtommath/bn_mp_init_copy.c",
"src/StormLib/src/libtommath/bn_mp_read_unsigned_bin.c",
"src/StormLib/src/libtommath/bn_mp_toradix_n.c",
"src/StormLib/src/libtommath/bn_fast_s_mp_mul_high_digs.c",
"src/StormLib/src/libtommath/bn_mp_toom_sqr.c",
"src/StormLib/src/libtommath/bn_mp_to_signed_bin_n.c",
"src/StormLib/src/libtommath/bn_mp_reduce_2k_setup_l.c",
"src/StormLib/src/libtommath/bn_mp_div.c",
"src/StormLib/src/libtommath/bn_prime_tab.c",
"src/StormLib/src/libtommath/bn_mp_karatsuba_sqr.c",
"src/StormLib/src/libtommath/bn_mp_gcd.c",
"src/StormLib/src/libtommath/bn_mp_prime_is_divisible.c",
"src/StormLib/src/libtommath/bn_mp_set_int.c",
"src/StormLib/src/libtommath/bn_mp_prime_fermat.c",
"src/StormLib/src/libtommath/bn_mp_cmp_d.c",
"src/StormLib/src/libtommath/bn_mp_add.c",
"src/StormLib/src/libtommath/bn_mp_sub_d.c",
"src/StormLib/src/libtommath/bn_s_mp_exptmod.c",
"src/StormLib/src/libtommath/bn_mp_init_size.c",
"src/StormLib/src/libtommath/bncore.c",
"src/StormLib/src/libtommath/bn_mp_radix_smap.c",
"src/StormLib/src/libtommath/bn_mp_reduce_2k_l.c",
"src/StormLib/src/libtommath/bn_mp_montgomery_calc_normalization.c",
"src/StormLib/src/libtommath/bn_mp_mod_d.c",
"src/StormLib/src/libtommath/bn_mp_set.c",
"src/StormLib/src/libtommath/bn_mp_or.c",
"src/StormLib/src/libtommath/bn_mp_sqrt.c",
"src/StormLib/src/libtommath/bn_mp_invmod_slow.c",
"src/StormLib/src/libtommath/bn_mp_count_bits.c",
"src/StormLib/src/libtommath/bn_mp_read_signed_bin.c",
"src/StormLib/src/libtommath/bn_mp_div_3.c",
"src/StormLib/src/libtommath/bn_mp_unsigned_bin_size.c",
"src/StormLib/src/libtommath/bn_mp_mulmod.c",
"src/StormLib/src/libtommath/bn_mp_clamp.c",
"src/StormLib/src/libtommath/bn_mp_reduce_2k_setup.c",
"src/StormLib/src/libtommath/bn_mp_toom_mul.c",
"src/StormLib/src/libtommath/bn_mp_montgomery_setup.c",
"src/StormLib/src/libtommath/bn_mp_expt_d.c",
"src/StormLib/src/libtommath/bn_mp_copy.c",
"src/StormLib/src/libtommath/bn_mp_dr_is_modulus.c",
"src/StormLib/src/libtommath/bn_mp_sqrmod.c",
"src/StormLib/src/libtommath/bn_mp_reduce_is_2k_l.c",
"src/StormLib/src/libtommath/bn_mp_mul_2d.c",
"src/StormLib/src/libtommath/bn_mp_fread.c",
"src/StormLib/src/libtommath/bn_mp_init_set.c",
"src/StormLib/src/libtommath/bn_mp_add_d.c",
"src/StormLib/src/libtommath/bn_mp_zero.c",
"src/StormLib/src/libtommath/bn_s_mp_add.c",
"src/StormLib/src/libtommath/bn_mp_radix_size.c",
"src/StormLib/src/libtommath/bn_mp_init_set_int.c",
"src/StormLib/src/libtommath/bn_mp_n_root.c",
"src/StormLib/src/libtommath/bn_mp_lshd.c",
"src/StormLib/src/libtommath/bn_mp_reduce_is_2k.c",
"src/StormLib/src/pklib/implode.c",
"src/StormLib/src/pklib/crc32.c",
"src/StormLib/src/pklib/explode.c",
"src/StormLib/src/zlib/crc32.c",
"src/StormLib/src/zlib/trees.c",
"src/StormLib/src/zlib/compress.c",
"src/StormLib/src/zlib/adler32.c",
"src/StormLib/src/zlib/inftrees.c",
"src/StormLib/src/zlib/inffast.c",
"src/StormLib/src/zlib/deflate.c",
"src/StormLib/src/zlib/inflate.c",
"src/StormLib/src/zlib/zutil.c",
"src/StormLib/src/lzma/C/LzFind.c",
"src/StormLib/src/lzma/C/LzmaEnc.c",
"src/StormLib/src/lzma/C/LzmaDec.c",
"src/StormLib/src/jenkins/lookup3.c"
],
'include_dirs': [
'src/'
'StormLib/src/',
"<!(node -e \"require('nan')\")"
],
'conditions': [
[ 'OS=="mac"',
{
'sources!': [
"src/StormLib/src/lzma/C/LzFind.c",
"src/StormLib/src/lzma/C/LzmaEnc.c",
"src/StormLib/src/lzma/C/LzmaDec.c",
],
'cflags': [
'-Wall',
'-D__SYS_BZLIB',
'-D__SYS_ZLIB',
'-D_7ZIP_ST',
'-arch x86_64',
'-shared'
],
'link_settings': {
'libraries': [
'-lbz2',
'-lz',
'-framework Carbon',
]
},
},
'OS=="linux"',
{
'cflags': [
'-Wall',
'-fPIC',
'-D__SYS_BZLIB',
'-D__SYS_ZLIB',
'-D_7ZIP_ST',
],
'link_settings': {
'libraries': [
'-lbz2',
'-lz',
]
},
}
]
]
}
]
}
| 52.792517
| 82
| 0.660653
| 2,212
| 15,521
| 4.389241
| 0.108951
| 0.269647
| 0.343187
| 0.329076
| 0.859306
| 0.826656
| 0.817283
| 0.741271
| 0.548563
| 0.380781
| 0
| 0.007657
| 0.192191
| 15,521
| 293
| 83
| 52.972696
| 0.766709
| 0
| 0
| 0.099656
| 0
| 0
| 0.752271
| 0.730816
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.003436
| 0
| 0.003436
| 0.010309
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
a83b173d781bf39fe042aa187d427135362f7eb0
| 120
|
py
|
Python
|
pycon-tutorial/pycon-project/collection/views.py
|
kajayi/hellowebapp-2
|
7bd6b98ebb73e38c3f5df0ff2cb7debeb88d97bf
|
[
"MIT"
] | null | null | null |
pycon-tutorial/pycon-project/collection/views.py
|
kajayi/hellowebapp-2
|
7bd6b98ebb73e38c3f5df0ff2cb7debeb88d97bf
|
[
"MIT"
] | null | null | null |
pycon-tutorial/pycon-project/collection/views.py
|
kajayi/hellowebapp-2
|
7bd6b98ebb73e38c3f5df0ff2cb7debeb88d97bf
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, render_to_response
def home(request):
return render_to_response('index.html')
| 24
| 55
| 0.8
| 17
| 120
| 5.411765
| 0.764706
| 0.173913
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116667
| 120
| 4
| 56
| 30
| 0.867925
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
a8858a8c421b3f819ef4c2aa94f130b767aaff51
| 3,306
|
py
|
Python
|
isecret/loss/gan_loss.py
|
QtacierP/ISECRET
|
89dae29b3935bbd9ccbcaaa8c061a0aa469a8e21
|
[
"MIT"
] | 9
|
2021-06-30T13:21:27.000Z
|
2021-12-30T11:49:12.000Z
|
isecret/loss/gan_loss.py
|
QtacierP/ISECRET
|
89dae29b3935bbd9ccbcaaa8c061a0aa469a8e21
|
[
"MIT"
] | null | null | null |
isecret/loss/gan_loss.py
|
QtacierP/ISECRET
|
89dae29b3935bbd9ccbcaaa8c061a0aa469a8e21
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch.nn.functional as F
import torch
from abc import ABC, abstractmethod
class GANLoss(ABC):
def __init__(self, args):
self.args = args
@ abstractmethod
def update_g(self, good_dis, fake_good, bad_dis=None, fake_bad=None):
pass
@abstractmethod
def update_d(self, good_dis, real_good, fake_good, bad_dis=None, real_bad=None, fake_bad=None):
pass
class NaiveGANLoss(GANLoss):
def __init__(self, args):
GANLoss.__init__(self, args)
self.real_loss = lambda x: F.binary_cross_entropy_with_logits\
(x, torch.ones_like(x))
self.fake_loss = lambda x: F.binary_cross_entropy_with_logits\
(x, torch.zeros_like(x))
def update_g(self, good_dis, fake_good, bad_dis=None, fake_bad=None):
fake_good_logits = good_dis(fake_good)
good_loss = self.real_loss(fake_good_logits)
if bad_dis is None:
return good_loss
fake_bad_logits = bad_dis(fake_bad)
bad_loss = self.real_loss(fake_bad_logits)
return good_loss, bad_loss
def update_d(self, good_dis, real_good, fake_good, bad_dis=None, real_bad=None, fake_bad=None):
# Train dis_good
real_good_logits = good_dis(real_good)
fake_good_logits = good_dis(fake_good.detach())
real_good_loss = self.real_loss(real_good_logits)
fake_good_loss = self.fake_loss(fake_good_logits)
good_dis_loss = (real_good_loss + fake_good_loss) / 2
if bad_dis is None:
return good_dis_loss
# Train dis_bad
real_bad_logits = bad_dis(real_bad)
fake_bad_logits = bad_dis(fake_bad.detach())
real_bad_loss = self.real_loss(real_bad_logits)
fake_bad_loss = self.fake_loss(fake_bad_logits)
bad_dis_loss = (real_bad_loss + fake_bad_loss) / 2
return good_dis_loss, bad_dis_loss
class LSGANLoss(nn.Module):
def __init__(self, args):
GANLoss.__init__(self, args)
self.real_loss = lambda x: F.mse_loss(x, torch.ones_like(x))
self.fake_loss = lambda x: F.mse_loss(x, torch.zeros_like(x))
def update_g(self, good_dis, fake_good, bad_dis=None, fake_bad=None):
fake_good_logits = good_dis(fake_good)
good_loss = self.real_loss(fake_good_logits)
if bad_dis is None:
return good_loss
fake_bad_logits = bad_dis(fake_bad)
bad_loss = self.real_loss(fake_bad_logits)
return good_loss, bad_loss
def update_d(self, good_dis, real_good, fake_good, bad_dis=None, real_bad=None, fake_bad=None):
# Train dis_good
real_good_logits = good_dis(real_good)
fake_good_logits = good_dis(fake_good.detach())
real_good_loss = self.real_loss(real_good_logits)
fake_good_loss = self.fake_loss(fake_good_logits)
good_dis_loss = (real_good_loss + fake_good_loss) / 2
if bad_dis is None:
return good_dis_loss
# Train dis_bad
real_bad_logits = bad_dis(real_bad)
fake_bad_logits = bad_dis(fake_bad.detach())
real_bad_loss = self.real_loss(real_bad_logits)
fake_bad_loss = self.fake_loss(fake_bad_logits)
bad_dis_loss = (real_bad_loss + fake_bad_loss) / 2
return good_dis_loss, bad_dis_loss
| 37.146067
| 99
| 0.681488
| 514
| 3,306
| 3.939689
| 0.089494
| 0.086914
| 0.059259
| 0.06716
| 0.901235
| 0.897284
| 0.897284
| 0.897284
| 0.89037
| 0.89037
| 0
| 0.001585
| 0.23654
| 3,306
| 88
| 100
| 37.568182
| 0.800713
| 0.017241
| 0
| 0.797101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0.028986
| 0.057971
| 0
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a897f6f163824861f033752239e86edbfd4e9f68
| 12,988
|
py
|
Python
|
backend/api/views/railroad_route_view.py
|
ferdn4ndo/infotrem
|
4728c5fe8385dcc0a1c75068429fa20e2afbf6f2
|
[
"MIT"
] | null | null | null |
backend/api/views/railroad_route_view.py
|
ferdn4ndo/infotrem
|
4728c5fe8385dcc0a1c75068429fa20e2afbf6f2
|
[
"MIT"
] | 1
|
2020-06-21T18:38:14.000Z
|
2020-06-21T21:57:09.000Z
|
backend/api/views/railroad_route_view.py
|
ferdn4ndo/infotrem
|
4728c5fe8385dcc0a1c75068429fa20e2afbf6f2
|
[
"MIT"
] | null | null | null |
from rest_framework import viewsets
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from infotrem.models.railroad_route_model import RailroadRoute, RailroadRouteInformation, RailroadRouteSection, \
RailroadRouteSectionInformation, RailroadRouteSectionLocation, RailroadRouteSectionPath
from infotrem.serializers.railroad_route_serializer import RailroadRouteSerializer, RailroadRouteInformationSerializer, \
RailroadRouteSectionSerializer, RailroadRouteSectionInformationSerializer, RailroadRouteSectionLocationSerializer, \
RailroadRouteSectionPathSerializer
from infotrem.services.policy import IsLoggedIn, IsModeratorOrReadOnly
class RailroadRouteViewSet(viewsets.ViewSet):
permission_classes = [IsLoggedIn, IsModeratorOrReadOnly]
def list(self, request):
queryset = RailroadRoute.objects.all()
serializer = RailroadRouteSerializer(queryset, many=True)
return Response(serializer.data)
def retrieve(self, request, pk=None):
queryset = RailroadRoute.objects.all()
company = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSerializer(company)
return serializer.data
def partial_update(self, request, pk=None):
queryset = RailroadRoute.objects.all()
company = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSerializer(company, data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def destroy(self, request, pk=None):
queryset = RailroadRoute.objects.all()
company = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSerializer(company)
return Response(serializer.data)
class RailroadRouteInformationViewSet(viewsets.ViewSet):
permission_classes = [IsLoggedIn, IsModeratorOrReadOnly]
def list(self, request, route_id=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
queryset = RailroadRouteInformation.objects.filter(railroad_route=route)
serializer = RailroadRouteInformationSerializer(queryset, many=True)
return Response(serializer.data)
def create(self, request, route_id=None):
data = request.data
data['railroad_route_id'] = route_id
serializer = RailroadRouteInformationSerializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def retrieve(self, request, route_id=None, pk=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
queryset = RailroadRouteInformation.objects.filter(railroad_route=route)
information = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteInformationSerializer(information)
return Response(serializer.data)
def partial_update(self, request, route_id=None, pk=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
queryset = RailroadRouteInformation.objects.filter(railroad_route=route)
information = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteInformationSerializer(information)
return Response(serializer.data)
class RailroadRouteSectionViewSet(viewsets.ViewSet):
permission_classes = [IsLoggedIn, IsModeratorOrReadOnly]
def list(self, request, route_id=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
queryset = RailroadRouteInformation.objects.filter(railroad_route=route)
serializer = RailroadRouteSectionSerializer(queryset, many=True)
return Response(serializer.data)
def create(self, request, route_id=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
data = request.data
data['railroad_route'] = route
serializer = RailroadRouteSectionSerializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def retrieve(self, request, route_id=None, pk=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
queryset = RailroadRouteSection.objects.filter(railroad_route=route)
information = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSectionSerializer(information)
return Response(serializer.data)
def partial_update(self, request, route_id=None, pk=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
queryset = RailroadRouteSection.objects.filter(railroad_route=route)
information = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSectionSerializer(information)
return Response(serializer.data)
def destroy(self, request, route_id=None, pk=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
queryset = RailroadRouteSection.objects.filter(railroad_route=route)
information = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSectionSerializer(information)
return Response(serializer.data)
class RailroadRouteSectionInformationViewSet(viewsets.ViewSet):
permission_classes = [IsLoggedIn, IsModeratorOrReadOnly]
def list(self, request, route_id=None, section_id=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
section_queryset = RailroadRouteSectionInformation.objects.filter(railroad_route_section=section)
serializer = RailroadRouteSectionInformationSerializer(section_queryset, many=True)
return Response(serializer.data)
def create(self, request, route_id=None, section_id=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
data = request.data
data['railroad_route_section_id'] = section.id
serializer = RailroadRouteSectionInformationSerializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def retrieve(self, request, route_id=None, section_id=None, pk=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
queryset = RailroadRouteSectionInformation.objects.filter(railroad_route_section=section)
information = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSectionInformationSerializer(information)
return Response(serializer.data)
def partial_update(self, request, route_id=None, section_id=None, pk=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
queryset = RailroadRouteSectionInformation.objects.filter(railroad_route_section=section)
information = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSectionInformationSerializer(information)
return Response(serializer.data)
def destroy(self, request, route_id=None, section_id=None, pk=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
queryset = RailroadRouteSectionInformation.objects.filter(railroad_route_section=section)
information = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSectionInformationSerializer(information)
return Response(serializer.data)
class RailroadRouteSectionLocationViewSet(viewsets.ViewSet):
permission_classes = [IsLoggedIn, IsModeratorOrReadOnly]
def list(self, request, route_id=None, section_id=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
section_queryset = RailroadRouteSectionLocation.objects.filter(railroad_route_section=section)
serializer = RailroadRouteSectionLocationSerializer(section_queryset, many=True)
return Response(serializer.data)
def create(self, request, route_id=None, section_id=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
data = request.data
data['railroad_route_section_id'] = section.id
serializer = RailroadRouteSectionLocationSerializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def retrieve(self, request, route_id=None, section_id=None, pk=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
queryset = RailroadRouteSectionLocation.objects.filter(railroad_route_section=section)
information = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSectionLocationSerializer(information)
return Response(serializer.data)
def partial_update(self, request, route_id=None, section_id=None, pk=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
queryset = RailroadRouteSectionLocation.objects.filter(railroad_route_section=section)
information = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSectionLocationSerializer(information)
return Response(serializer.data)
def destroy(self, request, route_id=None, section_id=None, pk=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
queryset = RailroadRouteSectionLocation.objects.filter(railroad_route_section=section)
information = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSectionLocationSerializer(information)
return Response(serializer.data)
class RailroadRouteSectionPathViewSet(viewsets.ViewSet):
permission_classes = [IsLoggedIn, IsModeratorOrReadOnly]
def list(self, request, route_id=None, section_id=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
section_queryset = RailroadRouteSectionPath.objects.filter(railroad_route_section=section)
serializer = RailroadRouteSectionPathSerializer(section_queryset, many=True)
return Response(serializer.data)
def create(self, request, route_id=None, section_id=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
data = request.data
data['railroad_route_section_id'] = section.id
serializer = RailroadRouteSectionPathSerializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def retrieve(self, request, route_id=None, section_id=None, pk=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
queryset = RailroadRouteSectionPath.objects.filter(railroad_route_section=section)
path = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSectionPathSerializer(path)
return Response(serializer.data)
def partial_update(self, request, route_id=None, section_id=None, pk=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
queryset = RailroadRouteSectionPath.objects.filter(railroad_route_section=section)
path = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSectionPathSerializer(path)
return Response(serializer.data)
def destroy(self, request, route_id=None, section_id=None, pk=None):
route = get_object_or_404(RailroadRoute.objects.all(), pk=route_id)
section = get_object_or_404(RailroadRouteSection.objects.all(), pk=section_id)
queryset = RailroadRouteSectionPath.objects.filter(railroad_route_section=section)
path = get_object_or_404(queryset, pk=pk)
serializer = RailroadRouteSectionPathSerializer(path)
return Response(serializer.data)
| 52.582996
| 121
| 0.749538
| 1,402
| 12,988
| 6.711127
| 0.052068
| 0.053566
| 0.065469
| 0.083324
| 0.87225
| 0.861409
| 0.855883
| 0.846105
| 0.81794
| 0.81794
| 0
| 0.015472
| 0.163998
| 12,988
| 246
| 122
| 52.796748
| 0.851078
| 0
| 0
| 0.814634
| 0
| 0
| 0.008161
| 0.005775
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136585
| false
| 0
| 0.029268
| 0
| 0.360976
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7640176e6a6b79a177585987d0992db4f85051f9
| 9,736
|
py
|
Python
|
src/webhook/tests/test_handlers.py
|
rca/issuebranch
|
525dbb30617fe452aba32613b50eca2420f22040
|
[
"Apache-2.0"
] | null | null | null |
src/webhook/tests/test_handlers.py
|
rca/issuebranch
|
525dbb30617fe452aba32613b50eca2420f22040
|
[
"Apache-2.0"
] | 2
|
2017-07-29T13:34:32.000Z
|
2021-09-15T21:25:35.000Z
|
src/webhook/tests/test_handlers.py
|
rca/issuebranch
|
525dbb30617fe452aba32613b50eca2420f22040
|
[
"Apache-2.0"
] | 3
|
2017-07-27T16:48:58.000Z
|
2018-08-01T18:57:44.000Z
|
from unittest import mock
from django.test import TestCase
from webhook import handlers
from webhook.tests.utils import get_project_webhook_data
from_on_deck_project_data = {
"action": "moved",
"changes": {"column_id": {"from": 2001377}},
"project_card": {
"url": "https://api.github.com/projects/columns/cards/6731956",
"column_url": "https://api.github.com/projects/columns/2001376",
"column_id": 2001376,
"id": 6731956,
"note": None,
"creator": {
"login": "rca",
"id": 53537,
"avatar_url": "https://avatars3.githubusercontent.com/u/53537?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/rca",
"html_url": "https://github.com/rca",
"followers_url": "https://api.github.com/users/rca/followers",
"following_url": "https://api.github.com/users/rca/following{/other_user}",
"gists_url": "https://api.github.com/users/rca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/rca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/rca/subscriptions",
"organizations_url": "https://api.github.com/users/rca/orgs",
"repos_url": "https://api.github.com/users/rca/repos",
"events_url": "https://api.github.com/users/rca/events{/privacy}",
"received_events_url": "https://api.github.com/users/rca/received_events",
"type": "User",
"site_admin": False,
},
"created_at": "2018-01-16T17:49:17Z",
"updated_at": "2018-01-17T19:59:50Z",
"content_url": "https://api.github.com/repos/openslate/openslate/issues/468",
"after_id": None,
},
"organization": {
"login": "openslate",
"id": 1767240,
"url": "https://api.github.com/orgs/openslate",
"repos_url": "https://api.github.com/orgs/openslate/repos",
"events_url": "https://api.github.com/orgs/openslate/events",
"hooks_url": "https://api.github.com/orgs/openslate/hooks",
"issues_url": "https://api.github.com/orgs/openslate/issues",
"members_url": "https://api.github.com/orgs/openslate/members{/member}",
"public_members_url": "https://api.github.com/orgs/openslate/public_members{/member}",
"avatar_url": "https://avatars2.githubusercontent.com/u/1767240?v=4",
"description": "",
},
"sender": {
"login": "rca",
"id": 53537,
"avatar_url": "https://avatars3.githubusercontent.com/u/53537?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/rca",
"html_url": "https://github.com/rca",
"followers_url": "https://api.github.com/users/rca/followers",
"following_url": "https://api.github.com/users/rca/following{/other_user}",
"gists_url": "https://api.github.com/users/rca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/rca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/rca/subscriptions",
"organizations_url": "https://api.github.com/users/rca/orgs",
"repos_url": "https://api.github.com/users/rca/repos",
"events_url": "https://api.github.com/users/rca/events{/privacy}",
"received_events_url": "https://api.github.com/users/rca/received_events",
"type": "User",
"site_admin": False,
},
}
to_on_deck_project_data = {
"action": "moved",
"changes": {"column_id": {"from": 2001376}},
"project_card": {
"url": "https://api.github.com/projects/columns/cards/6731956",
"column_url": "https://api.github.com/projects/columns/2001377",
"column_id": 2001377,
"id": 6731956,
"note": None,
"creator": {
"login": "rca",
"id": 53537,
"avatar_url": "https://avatars3.githubusercontent.com/u/53537?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/rca",
"html_url": "https://github.com/rca",
"followers_url": "https://api.github.com/users/rca/followers",
"following_url": "https://api.github.com/users/rca/following{/other_user}",
"gists_url": "https://api.github.com/users/rca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/rca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/rca/subscriptions",
"organizations_url": "https://api.github.com/users/rca/orgs",
"repos_url": "https://api.github.com/users/rca/repos",
"events_url": "https://api.github.com/users/rca/events{/privacy}",
"received_events_url": "https://api.github.com/users/rca/received_events",
"type": "User",
"site_admin": False,
},
"created_at": "2018-01-16T17:49:17Z",
"updated_at": "2018-01-17T19:26:28Z",
"content_url": "https://api.github.com/repos/openslate/openslate/issues/468",
"after_id": None,
},
"organization": {
"login": "openslate",
"id": 1767240,
"url": "https://api.github.com/orgs/openslate",
"repos_url": "https://api.github.com/orgs/openslate/repos",
"events_url": "https://api.github.com/orgs/openslate/events",
"hooks_url": "https://api.github.com/orgs/openslate/hooks",
"issues_url": "https://api.github.com/orgs/openslate/issues",
"members_url": "https://api.github.com/orgs/openslate/members{/member}",
"public_members_url": "https://api.github.com/orgs/openslate/public_members{/member}",
"avatar_url": "https://avatars2.githubusercontent.com/u/1767240?v=4",
"description": "",
},
"sender": {
"login": "rca",
"id": 53537,
"avatar_url": "https://avatars3.githubusercontent.com/u/53537?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/rca",
"html_url": "https://github.com/rca",
"followers_url": "https://api.github.com/users/rca/followers",
"following_url": "https://api.github.com/users/rca/following{/other_user}",
"gists_url": "https://api.github.com/users/rca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/rca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/rca/subscriptions",
"organizations_url": "https://api.github.com/users/rca/orgs",
"repos_url": "https://api.github.com/users/rca/repos",
"events_url": "https://api.github.com/users/rca/events{/privacy}",
"received_events_url": "https://api.github.com/users/rca/received_events",
"type": "User",
"site_admin": False,
},
}
on_deck_column_data = {
"url": "https://api.github.com/projects/columns/2001377",
"project_url": "https://api.github.com/projects/1206552",
"cards_url": "https://api.github.com/projects/columns/2001377/cards",
"id": 2001377,
"name": "On Deck",
"created_at": "2018-01-11T19:36:33Z",
"updated_at": "2018-01-17T19:26:28Z",
}
class ProjectHandlerTestCase(TestCase):
def _get_project_handler(self, data):
project_handler = handlers.ProjectHandler(data)
project_handler.session = mock.Mock()
return project_handler
def test_move_grooming_card(self):
data = get_project_webhook_data()
project_handler = self._get_project_handler(data)
project_handler.session.request.return_value.json.return_value = {
"url": "https://api.github.com/projects/columns/2001376",
"project_url": "https://api.github.com/projects/1206552",
"cards_url": "https://api.github.com/projects/columns/2001376/cards",
"id": 2001376,
"name": "Grooming",
"created_at": "2018-01-11T19:36:33Z",
"updated_at": "2018-01-17T17:46:36Z",
}
project_handler.sync_on_deck = mock.Mock()
project_handler.run()
# a grooming card will not sync on deck
project_handler.sync_on_deck.assert_not_called()
def test_move_on_deck_card(self):
data = get_project_webhook_data()
project_handler = self._get_project_handler(data)
project_handler.session.request.return_value.json.return_value = {
"url": "https://api.github.com/projects/columns/2001376",
"project_url": "https://api.github.com/projects/1206552",
"cards_url": "https://api.github.com/projects/columns/2001376/cards",
"id": 2001376,
"name": "On Deck",
"created_at": "2018-01-11T19:36:33Z",
"updated_at": "2018-01-17T17:46:36Z",
}
project_handler.run()
# an on deck card will sync!
project_handler.sync_on_deck.assert_called_with()
def test_detect_remove_from_on_deck(self):
project_handler = self._get_project_handler(from_on_deck_project_data)
project_handler.remove_from_on_deck = mock.Mock()
project_handler.run()
project_handler.remove_from_on_deck.assert_called_with(mock.ANY)
def test_detect_add_to_on_deck(self):
project_handler = self._get_project_handler(to_on_deck_project_data)
project_handler.add_to_on_deck = mock.Mock()
project_handler.run()
project_handler.add_to_on_deck.assert_called_with(mock.ANY)
def test_add_to_on_deck(self):
project_handler = self._get_project_handler(to_on_deck_project_data)
project_handler.add_to_on_deck = mock.Mock()
project_handler.run()
project_handler.add_to_on_deck.assert_called_with(mock.ANY)
| 43.464286
| 94
| 0.619864
| 1,198
| 9,736
| 4.831386
| 0.108514
| 0.109191
| 0.131133
| 0.202661
| 0.915515
| 0.911368
| 0.892191
| 0.884243
| 0.873359
| 0.85038
| 0
| 0.049102
| 0.205115
| 9,736
| 223
| 95
| 43.659193
| 0.698798
| 0.006574
| 0
| 0.757732
| 0
| 0
| 0.53201
| 0
| 0
| 0
| 0
| 0
| 0.025773
| 1
| 0.030928
| false
| 0
| 0.020619
| 0
| 0.061856
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
764e80c14aa7776ed15e0dc43f55078c72632dc2
| 562
|
py
|
Python
|
python/mprpc/config/__init__.py
|
MusicScience37/mpRPC
|
f0256eeae4a0789038c4d12fb96efa5006968724
|
[
"Apache-2.0"
] | null | null | null |
python/mprpc/config/__init__.py
|
MusicScience37/mpRPC
|
f0256eeae4a0789038c4d12fb96efa5006968724
|
[
"Apache-2.0"
] | null | null | null |
python/mprpc/config/__init__.py
|
MusicScience37/mpRPC
|
f0256eeae4a0789038c4d12fb96efa5006968724
|
[
"Apache-2.0"
] | null | null | null |
"""module of configuration
"""
from .._mprpc_cpp import (
CompressionType,
CompressionConfig,
TCPAcceptorConfig,
TCPConnectorConfig,
UDPAcceptorConfig,
UDPConnectorConfig,
ServerConfig,
TransportType,
ClientConfig,
MPRPCConfig,
load,
)
for obj in (
CompressionType,
CompressionConfig,
TCPAcceptorConfig,
TCPConnectorConfig,
UDPAcceptorConfig,
UDPConnectorConfig,
ServerConfig,
TransportType,
ClientConfig,
MPRPCConfig,
load,
):
obj.__module__ = "mprpc.config"
del obj
| 17.030303
| 35
| 0.688612
| 38
| 562
| 10.026316
| 0.605263
| 0.167979
| 0.257218
| 0.351706
| 0.808399
| 0.808399
| 0.808399
| 0.808399
| 0.808399
| 0.808399
| 0
| 0
| 0.240214
| 562
| 32
| 36
| 17.5625
| 0.892272
| 0.040925
| 0
| 0.785714
| 0
| 0
| 0.022556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035714
| 0
| 0.035714
| 0
| 0
| 0
| 1
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
768c42b1434b164ec69afc189cc7592e25239ded
| 135
|
py
|
Python
|
first-hw.py
|
ThuraDwe/astr-119
|
4564598064c805a54f83543e78b9104c3665fd52
|
[
"MIT"
] | null | null | null |
first-hw.py
|
ThuraDwe/astr-119
|
4564598064c805a54f83543e78b9104c3665fd52
|
[
"MIT"
] | 8
|
2020-10-01T23:52:53.000Z
|
2020-12-15T08:38:34.000Z
|
first-hw.py
|
ThuraDwe/astr-119
|
4564598064c805a54f83543e78b9104c3665fd52
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#this program will write
#Thura Dwe He/Him/His
print("Thura Dwe He/Him/His") # print out Thura Dwe He/Him/His
| 19.285714
| 62
| 0.718519
| 26
| 135
| 3.730769
| 0.576923
| 0.247423
| 0.309278
| 0.402062
| 0.597938
| 0.43299
| 0
| 0
| 0
| 0
| 0
| 0.008696
| 0.148148
| 135
| 6
| 63
| 22.5
| 0.834783
| 0.703704
| 0
| 0
| 0
| 0
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
7691fb3595228c5804c12c46f90037c773d37507
| 11,500
|
py
|
Python
|
venv/Lib/site-packages/scipy/spatial/tests/data/cdist-X2.txt.py
|
roshanba/mangal
|
f7b428811dc07214009cc33f0beb665ead402038
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
venv/Lib/site-packages/scipy/spatial/tests/data/cdist-X2.txt.py
|
roshanba/mangal
|
f7b428811dc07214009cc33f0beb665ead402038
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
venv/Lib/site-packages/scipy/spatial/tests/data/cdist-X2.txt.py
|
roshanba/mangal
|
f7b428811dc07214009cc33f0beb665ead402038
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
| 547.619048
| 574
| 0.96
| 460
| 11,500
| 24
| 0.002174
| 1.995652
| 2.986957
| 3.973913
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0.04
| 11,500
| 20
| 575
| 575
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
76af285ef065bfe2a95791e0f351506796e3999b
| 8,384
|
py
|
Python
|
helper_fns/reporting_rules_postprocess.py
|
Avidereta/risk-averse-hetero-bo
|
36312f4f0eb2e7cb49f9e271c398de023994330a
|
[
"MIT"
] | 6
|
2021-11-08T09:40:38.000Z
|
2022-01-17T12:26:33.000Z
|
helper_fns/reporting_rules_postprocess.py
|
Avidereta/risk-averse-hetero-bo
|
36312f4f0eb2e7cb49f9e271c398de023994330a
|
[
"MIT"
] | null | null | null |
helper_fns/reporting_rules_postprocess.py
|
Avidereta/risk-averse-hetero-bo
|
36312f4f0eb2e7cb49f9e271c398de023994330a
|
[
"MIT"
] | 1
|
2022-03-03T11:03:20.000Z
|
2022-03-03T11:03:20.000Z
|
from botorch.models import FixedNoiseGP, SingleTaskGP
from rabo.acquisition.acquisition import RiskAverseUpperConfidenceBound, UpperConfidenceBound, LowerConfidenceBound
import torch
from torch import Tensor
from rabo.optimization.bo_loop import bo_loop_learn_rho, evaluate_rho_mean
def transform_x_to_tensor(df_values) -> Tensor:
"""
Transforms dumped values from df to tensor format n_points x dimention
:param df_values:
:return:
"""
return torch.vstack([torch.hstack([x_i for x_i in x]) for x in df_values])
def report_idx_max_ymean(resdf):
"""
Returns list of iterations numbers where each corresponds to the best ymean observed so far
:param resdf: pd.DataFrame with column 'ymean'
:return: list
"""
best_idx = [resdf['ymean'].values[:i].argmax() for i in range(1, len(resdf['ymean']) + 1)]
return best_idx
def report_idx_max_mv(resdf, gamma):
"""
Returns list of iterations numbers where each corresponds to the best ymean observed so far
:param resdf: pd.DataFrame with column 'ymean'
:return: list
"""
mv = resdf['ymean'].values - gamma*resdf['yvar'].values
best_idx = [mv[:i].argmax() for i in range(1, len(resdf['ymean']) + 1)]
return best_idx
def report_idx_max_last_lcb_rahbo(resdf, t=None, dt=10):
lcb_f = []
idxs = []
n_initial = resdf.iloc[0].config['n_initial']
if resdf.iloc[0].config['bo_method']['name'] == 'rahbo_us':
n_initial += resdf.iloc[0].config['bo_method']['n_budget_var']
if t is None:
for t in range(n_initial, len(resdf), dt):
if t % 10 == 0:
print (f'report_idx_max_last_lcb_rahbo, t={t}')
train_y = torch.Tensor(resdf['ymean'].values[:t-1]).reshape((-1, 1))
train_x = transform_x_to_tensor(resdf['inputs'].values[:t - 1])
train_yvar = torch.Tensor(resdf['yvar'].values[:t - 1]).reshape((-1, 1))
gp_var_state_dict = resdf['gps_var_state_dict'][t]
gp_state_dict = resdf['gps'][t]
gp = FixedNoiseGP(train_x, train_y, train_yvar)
gp_var = SingleTaskGP(train_x, train_yvar)
gp.load_state_dict(gp_state_dict)
gp_var.load_state_dict(gp_var_state_dict)
beta = resdf.iloc[0].config['bo_method']['beta']
gamma = resdf.iloc[0].config['bo_method']['gamma']
ralcb_function = RiskAverseUpperConfidenceBound(gp, gp_var, beta, beta, gamma, maximize=False)
lcbs = ralcb_function.forward(train_x.reshape((len(train_x), 1, -1)))
idxs.extend([int(lcbs.argmax())] * dt)
lcb_f.extend([lcbs.max()] * dt)
else:
train_y = torch.Tensor(resdf['ymean'].values[:t - 1]).reshape((-1, 1))
train_x = transform_x_to_tensor(resdf['inputs'].values[:t - 1])
train_yvar = torch.Tensor(resdf['yvar'].values[:t - 1]).reshape((-1, 1))
gp_var_state_dict = resdf['gps_var_state_dict'][t]
gp_state_dict = resdf['gps'][t]
gp = FixedNoiseGP(train_x, train_y, train_yvar)
gp_var = SingleTaskGP(train_x, train_yvar)
gp.load_state_dict(gp_state_dict)
gp_var.load_state_dict(gp_var_state_dict)
beta = resdf.iloc[0].config['bo_method']['beta']
gamma = resdf.iloc[0].config['bo_method']['gamma']
ralcb_function = RiskAverseUpperConfidenceBound(gp, gp_var, beta, beta, gamma, maximize=False)
lcbs = ralcb_function.forward(train_x.reshape((len(train_x), 1, -1)))
idxs.append(int(lcbs.argmax()))
lcb_f.append(lcbs.max())
idx_all = [i for i in range(n_initial)]
idx_all.extend(idxs)
return idx_all, lcb_f
def report_idx_max_last_lcb_rahbous(resdf, t=None, dt=10):
"""
Returns list of iterations numbers where each corresponds to the max lcb for the model at this iterations computed
for the points observed so far
:param resdf: pd.DataFrame with column 'ymean'
:return: list
"""
lcb_f = []
idxs = []
n_initial = resdf.iloc[0].config['n_initial']
n_initial += resdf.iloc[0].config['bo_method']['n_budget_var']
if t is None:
for t in range(n_initial, len(resdf), dt):
if t % 10 == 0:
print(f'report_idx_max_last_lcb_rahbo, t={t}')
train_y = torch.Tensor(resdf['ymean'].values[:t - 1]).reshape((-1, 1))
train_x = transform_x_to_tensor(resdf['inputs'].values[:t - 1])
train_yvar = torch.Tensor(resdf['yvar'].values[:t - 1]).reshape((-1, 1))
gp_var_state_dict = resdf['gps_var_state_dict'][t]
gp_state_dict = resdf['gps'][t]
gp = FixedNoiseGP(train_x, train_y, train_yvar)
gp_var = SingleTaskGP(train_x, train_yvar)
gp.load_state_dict(gp_state_dict)
gp_var.load_state_dict(gp_var_state_dict)
beta = resdf.iloc[0].config['bo_method']['beta']
gamma = resdf.iloc[0].config['bo_method']['gamma']
rho_mean = lambda x: evaluate_rho_mean(x, gp_var, gamma, 1)
lcb_function = LowerConfidenceBound(gp, beta, maximize=False)
x = train_x.reshape((len(train_x), 1, -1))
lcbs = lcb_function.forward(x) - rho_mean(x)
idxs.extend([int(lcbs.argmax())] * dt)
lcb_f.extend([lcbs.max()] * dt)
else:
train_y = torch.Tensor(resdf['ymean'].values[:t - 1]).reshape((-1, 1))
train_x = transform_x_to_tensor(resdf['inputs'].values[:t - 1])
train_yvar = torch.Tensor(resdf['yvar'].values[:t - 1]).reshape((-1, 1))
gp_var_state_dict = resdf['gps_var_state_dict'][t]
gp_state_dict = resdf['gps'][t]
gp = FixedNoiseGP(train_x, train_y, train_yvar)
gp_var = SingleTaskGP(train_x, train_yvar)
gp.load_state_dict(gp_state_dict)
gp_var.load_state_dict(gp_var_state_dict)
beta = resdf.iloc[0].config['bo_method']['beta']
gamma = resdf.iloc[0].config['bo_method']['gamma']
rho_mean = lambda x: evaluate_rho_mean(x, gp_var, gamma, 1)
lcb_function = LowerConfidenceBound(gp, beta, maximize=False)
x = train_x.reshape((len(train_x), 1, -1))
lcbs = lcb_function.forward(x) - rho_mean(x)
idxs.append(int(lcbs.argmax()))
lcb_f.append(lcbs.max())
idx_all = [i for i in range(n_initial)]
idx_all.extend(idxs)
return idx_all, lcb_f
def report_idx_max_last_lcb_ucb(resdf, t=None, dt=10):
"""
Returns list of iterations numbers where each corresponds to the max lcb for the model at this iterations computed
for the points observed so far
:param resdf: pd.DataFrame with column 'ymean'
:return: list
"""
lcb_f = []
idxs = []
n_initial = resdf.iloc[0].config['n_initial']
if t is None:
for t in range(n_initial, len(resdf), dt):
if t % 10 == 0:
print(f'report_idx_max_last_lcb_ucb, t={t}')
train_y = torch.Tensor(resdf['ymean'].values[:t - 1]).reshape((-1, 1))
train_x = transform_x_to_tensor(resdf['inputs'].values[:t - 1])
train_yvar = torch.Tensor(resdf['yvar'].values[:t - 1]).reshape((-1, 1))
gp_state_dict = resdf['gps'][t]
gp = FixedNoiseGP(train_x, train_y, train_yvar)
gp.load_state_dict(gp_state_dict)
beta = resdf.iloc[0].config['bo_method']['beta']
lcb_function = LowerConfidenceBound(gp, beta, maximize=False)
lcbs = lcb_function.forward(train_x.reshape((len(train_x), 1, -1)))
idxs.extend([int(lcbs.argmax())] * dt)
lcb_f.extend([lcbs.max()] * dt)
else:
train_y = torch.Tensor(resdf['ymean'].values[:t - 1]).reshape((-1, 1))
train_x = transform_x_to_tensor(resdf['inputs'].values[:t - 1])
train_yvar = torch.Tensor(resdf['yvar'].values[:t - 1]).reshape((-1, 1))
gp_state_dict = resdf['gps'][t]
gp = FixedNoiseGP(train_x, train_y, train_yvar)
gp.load_state_dict(gp_state_dict)
beta = resdf.iloc[0].config['bo_method']['beta']
lcb_function = LowerConfidenceBound(gp, beta, maximize=False)
lcbs = lcb_function.forward(train_x.reshape((len(train_x), 1, -1)))
idxs.append(int(lcbs.argmax()))
lcb_f.append(lcbs.max())
idx_all = [i for i in range(n_initial)]
idx_all.extend(idxs)
return idx_all, lcb_f
| 41.098039
| 118
| 0.628459
| 1,228
| 8,384
| 4.051303
| 0.095277
| 0.061508
| 0.028945
| 0.051457
| 0.892261
| 0.889447
| 0.881005
| 0.881005
| 0.881005
| 0.881005
| 0
| 0.014093
| 0.229843
| 8,384
| 204
| 119
| 41.098039
| 0.756388
| 0.097448
| 0
| 0.877698
| 0
| 0
| 0.071736
| 0.011822
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043165
| false
| 0
| 0.035971
| 0
| 0.122302
| 0.021583
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4f19ccccdbea2fa5ce58e81e776284291960d35e
| 102
|
py
|
Python
|
app/routes.py
|
mzaglia/flask-snippets
|
e54c9ad15cc287b68eef89502fde861bf164f167
|
[
"Unlicense"
] | null | null | null |
app/routes.py
|
mzaglia/flask-snippets
|
e54c9ad15cc287b68eef89502fde861bf164f167
|
[
"Unlicense"
] | null | null | null |
app/routes.py
|
mzaglia/flask-snippets
|
e54c9ad15cc287b68eef89502fde861bf164f167
|
[
"Unlicense"
] | null | null | null |
from flask import current_app
@current_app.route('/')
def index():
return {'msg': 'Hello World'}
| 17
| 33
| 0.676471
| 14
| 102
| 4.785714
| 0.857143
| 0.298507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 102
| 5
| 34
| 20.4
| 0.77907
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
4f219c9566a3457c7cab5888bd2ba366a0bd2f37
| 11,516
|
py
|
Python
|
huxley/core/migrations/0038_auto_20200202_0032.py
|
srisainachuri/huxley
|
7166a1423e49b506d6d5f142c748eac4e5d2314c
|
[
"BSD-3-Clause"
] | null | null | null |
huxley/core/migrations/0038_auto_20200202_0032.py
|
srisainachuri/huxley
|
7166a1423e49b506d6d5f142c748eac4e5d2314c
|
[
"BSD-3-Clause"
] | null | null | null |
huxley/core/migrations/0038_auto_20200202_0032.py
|
srisainachuri/huxley
|
7166a1423e49b506d6d5f142c748eac4e5d2314c
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.2.6 on 2020-02-02 00:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0037_positionpaper_graded_file'),
]
operations = [
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('building_name', models.CharField(max_length=64)),
('room_number', models.IntegerField()),
('number_of_seats', models.IntegerField(default=20)),
],
options={
'db_table': 'room',
'ordering': ['building_name', 'room_number'],
'unique_together': {('building_name', 'room_number')},
},
),
migrations.AddField(
model_name='delegate',
name='seat',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='assignment',
name='paper',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.PositionPaper'),
),
migrations.AlterField(
model_name='committee',
name='rubric',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.Rubric'),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_10_comment',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_10_name',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_1_comment',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_1_name',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_2_comment',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_2_name',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_3_comment',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_3_name',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_4_comment',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_4_name',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_5_comment',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_5_name',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_6_comment',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_6_name',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_7_comment',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_7_name',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_8_comment',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_8_name',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_9_comment',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='committeefeedback',
name='chair_9_name',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AlterField(
model_name='committeefeedback',
name='comment',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='countrypreference',
name='country',
field=models.ForeignKey(limit_choices_to={'special': False}, on_delete=django.db.models.deletion.CASCADE, to='core.Country'),
),
migrations.AlterField(
model_name='delegate',
name='published_summary',
field=models.TextField(blank=True, default='', null=True),
),
migrations.AlterField(
model_name='delegate',
name='summary',
field=models.TextField(blank=True, default='', null=True),
),
migrations.AlterField(
model_name='positionpaper',
name='file',
field=models.FileField(null=True, upload_to='position_papers/'),
),
migrations.AlterField(
model_name='positionpaper',
name='graded_file',
field=models.FileField(blank=True, null=True, upload_to='graded_papers/'),
),
migrations.AlterField(
model_name='registration',
name='country_preferences',
field=models.ManyToManyField(blank=True, through='core.CountryPreference', to='core.Country'),
),
migrations.AlterField(
model_name='registration',
name='num_chinese_speaking_delegates',
field=models.PositiveSmallIntegerField(default=0),
),
migrations.AlterField(
model_name='registration',
name='registration_comments',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='rubric',
name='grade_category_1',
field=models.CharField(blank=True, default='', max_length=128),
),
migrations.AlterField(
model_name='rubric',
name='grade_category_2',
field=models.CharField(blank=True, default='', max_length=128),
),
migrations.AlterField(
model_name='rubric',
name='grade_category_3',
field=models.CharField(blank=True, default='', max_length=128),
),
migrations.AlterField(
model_name='rubric',
name='grade_category_4',
field=models.CharField(blank=True, default='', max_length=128),
),
migrations.AlterField(
model_name='rubric',
name='grade_category_5',
field=models.CharField(blank=True, default='', max_length=128),
),
migrations.AlterField(
model_name='rubric',
name='grade_t2_category_1',
field=models.CharField(blank=True, default='', max_length=128),
),
migrations.AlterField(
model_name='rubric',
name='grade_t2_category_2',
field=models.CharField(blank=True, default='', max_length=128),
),
migrations.AlterField(
model_name='rubric',
name='grade_t2_category_3',
field=models.CharField(blank=True, default='', max_length=128),
),
migrations.AlterField(
model_name='rubric',
name='grade_t2_category_4',
field=models.CharField(blank=True, default='', max_length=128),
),
migrations.AlterField(
model_name='rubric',
name='grade_t2_category_5',
field=models.CharField(blank=True, default='', max_length=128),
),
migrations.AlterField(
model_name='rubric',
name='topic_one',
field=models.CharField(blank=True, default='', max_length=64),
),
migrations.AlterField(
model_name='rubric',
name='topic_two',
field=models.CharField(blank=True, default='', max_length=64),
),
migrations.AlterField(
model_name='school',
name='primary_gender',
field=models.PositiveSmallIntegerField(choices=[(1, 'Male'), (2, 'Female'), (3, 'Other'), (4, 'Unspecified')], default=4),
),
migrations.AlterField(
model_name='school',
name='primary_type',
field=models.PositiveSmallIntegerField(choices=[(2, 'Faculty'), (1, 'Student')], default=2),
),
migrations.AlterField(
model_name='school',
name='program_type',
field=models.PositiveSmallIntegerField(choices=[(1, 'Club'), (2, 'Class')], default=1),
),
migrations.AlterField(
model_name='school',
name='secondary_gender',
field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Male'), (2, 'Female'), (3, 'Other'), (4, 'Unspecified')], default=4),
),
migrations.AlterField(
model_name='school',
name='secondary_type',
field=models.PositiveSmallIntegerField(blank=True, choices=[(2, 'Faculty'), (1, 'Student')], default=2),
),
migrations.AlterField(
model_name='secretariatmember',
name='name',
field=models.CharField(default='', max_length=100),
),
migrations.AlterUniqueTogether(
name='delegate',
unique_together={('seat',)},
),
migrations.CreateModel(
name='RoomComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=50000)),
('rating', models.IntegerField()),
('room', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.Room')),
],
options={
'db_table': 'room_comment',
},
),
migrations.AddField(
model_name='committee',
name='room',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.Room'),
),
]
| 38.386667
| 146
| 0.564345
| 1,044
| 11,516
| 6.041188
| 0.126437
| 0.072776
| 0.194229
| 0.225305
| 0.804661
| 0.7785
| 0.720152
| 0.69082
| 0.676233
| 0.676233
| 0
| 0.019101
| 0.304446
| 11,516
| 299
| 147
| 38.51505
| 0.76829
| 0.003908
| 0
| 0.679181
| 1
| 0
| 0.151103
| 0.008981
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.006826
| 0
| 0.017065
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4fa62813b0f96ccecac37935edfe3ddbd051e3af
| 52,380
|
py
|
Python
|
cable.py
|
mdbartos/RIPS
|
ab654138ccdcd8cb7c4ab53092132e0156812e95
|
[
"MIT"
] | 1
|
2021-04-02T03:05:55.000Z
|
2021-04-02T03:05:55.000Z
|
cable.py
|
mdbartos/RIPS
|
ab654138ccdcd8cb7c4ab53092132e0156812e95
|
[
"MIT"
] | 2
|
2015-05-13T23:35:43.000Z
|
2015-05-22T00:51:23.000Z
|
cable.py
|
mdbartos/RIPS
|
ab654138ccdcd8cb7c4ab53092132e0156812e95
|
[
"MIT"
] | 2
|
2015-05-13T23:29:03.000Z
|
2015-05-21T22:50:15.000Z
|
import math
import numpy as np
import pandas as pd
class cable():
"""
%% TO BE ASSIGNED
T_line % line temperature (C)
T_surf % line surface temperature (C)
T_film % line film temperature (C)
L % line length (m)
%% INDEPENDENT VARIABLES
T_amb % ambient temperature (C)
I % electrical current (A)
D % line diameter (m)
V % wind velocity (m / s)
%% CONSTANTS AND PARAMETERS
R % line resistance (ohm)
R_0 % line resistivity at temperature T_0 (ohm)
a_T % temperature coefficient of resistance (K^-1)
T_0 % reference temperature (C)
k % air thermal conductivity (W / mK)
a_s % absorptivity of the line surface (unitless)
e_s % emissivity of the line surface (unitless)
v % dynamic viscosity of air (m^2 / s)
Pr % Prandtl number (unitless)
sigma % Stefan-Boltzmann constant
%% DEPENDENT VARIABLES
q_gen % heat generated in the line by electrical resistive losses (W)
q_cond % conductive heat transfer within the line (W)
q_conv % convective heat transfer from the line (W)
q_rad_in % radiative heat added to the line from the sun (W)
q_rad_out % radiative heat lost from the line to the surroundings (W)
I_sun % incident solar radiation (W / m^2)
A_s % line surface area (m^2)
A_c % line cross-sectional area (m^2)
Nu % Nusselt number
Re % Reynolds number
"""
def __init__(self, code_name=None, model=None, n_conductors=1, **kwargs):
if code_name is None:
for k, v in kwargs.items():
setattr(self, k, v)
self.code_name = code_name.title()
self.model = model.lower()
self.init_models()
self.n_conductors = n_conductors
for k, v in self.meta[self.model].items():
setattr(self, k, v)
def reynolds(self, V, D, v, Mair=1.103):
r = V * D / v
return r
def nusselt(self, Re, Pr):
a = 0.62 * ( (Re) ** (1.0/2.0) ) * ( Pr ** (1.0/3.0) )
b = (1 + (0.4/(Pr**(2.0/3.0) ) ) ) ** (1.0/4.0)
c = (Re / 282000) ** (5.0/8.0)
n = 0.3 + (a/b) * ( (1 + c) ** (4.0/5.0) )
return n
def air_prop(self, T_amb):
# temp v k Pr
air_prop = np.array([[200, 7.59e-6, 18.1e-3, 0.737],
[250, 11.44e-6, 22.3e-3, 0.720],
[300, 15.89e-6, 26.3e-3, 0.707],
[350, 20.92e-6, 30.0e-3, 0.700],
[400, 26.41e-6, 33.8e-3, 0.690],
[450, 32.39e-6, 37.3e-3, 0.686],
[500, 38.79e-6, 40.7e-3, 0.684],
[550, 45.57e-6, 43.9e-3, 0.683],
[600, 52.69e-6, 46.9e-3, 0.685]])
v, k, Pr = np.apply_along_axis(lambda x: np.interp(T_amb, air_prop[:,0], x),
0, air_prop[:,1:])
return v, k, Pr
def R_T(self, R_lo, R_mid, R_hi, T_line, N_cond, T_range=None):
if T_range is None:
T_range = self.T_range
if 273 <= T_line <= 323:
R = ((R_lo +
((R_lo - R_mid)/(T_range[0] - T_range[1]))
*(T_line - T_range[0]))/N_cond)
elif T_line > 323:
R = ((R_mid +
((R_mid - R_hi)/(T_range[1] - T_range[2]))
*(T_line - T_range[1]))/N_cond)
else:
R = R_lo
print('Temperature out of bounds')
return R
def Q_joule(self, I, R):
w = (I**2) * R
return w
def Q_rad_in(self, I_sun, D, a_s):
w = I_sun * D * a_s
return w
def Q_conv(self, htcoeff, D, T_line, T_amb):
w = math.pi * htcoeff * D * (T_line - T_amb)
return w
def Q_rad_out(self, e_s, D, sigma, T_line, T_amb):
w = math.pi * e_s * D * sigma * (T_line**4 - T_amb**4)
return w
def I(self, T_line, T_amb, V, a_s=0.5, e_s=0.5, I_sun=900.0, temp_factor=1, wind_factor=1, n_iter=10):
sigma = 5.6703e-8 # Stefan-Boltzmann constant
D = self.D
R_lo, R_mid, R_hi = self.R[0], self.R[1], self.R[2]
T_amb = T_amb*temp_factor
V = V*wind_factor
v, k, Pr = self.air_prop(T_amb)
Re = self.reynolds(V, D, v)
htcoeff = self.nusselt(Re, Pr) * k / D
R = self.R_T(R_lo, R_mid, R_hi, T_line, self.n_conductors)
I = ((-self.Q_rad_in(I_sun, D, a_s) + self.Q_rad_out(e_s, D, sigma, T_line, T_amb) + self.Q_conv(htcoeff, D, T_line, T_amb))/R)**0.5
return I
def T_c(self, I, T_amb, V, a_s=0.5, e_s=0.5, I_sun=900.0, temp_factor=1, wind_factor=1, n_iter=10):
sigma = 5.6703e-8 # Stefan-Boltzmann constant
D = self.D
R_lo, R_mid, R_hi = self.R[0], self.R[1], self.R[2]
T_amb = T_amb*temp_factor
V = V*wind_factor
v, k, Pr = self.air_prop(T_amb)
Re = self.reynolds(V, D, v)
htcoeff = self.nusselt(Re, Pr) * k / D
def T_line(T_init):
R = self.R_T(R_lo, R_mid, R_hi, T_init, self.n_conductors)
C4 = e_s * sigma * D * math.pi
C3 = 0.0
C2 = 0.0
C1 = htcoeff * D * math.pi
C0 = - ( I ** 2 * R
+ I_sun * a_s * D
+ htcoeff * D * math.pi * T_amb
+ e_s * D * math.pi * sigma * (T_amb ** 4))
return np.roots([C4, C3, C2, C1, C0])
T_c = T_amb
for i in range(n_iter):
T_arr = T_line(T_c)
T_c = np.real(T_arr[np.where((np.real(T_arr) > 0) & ~(np.iscomplex(T_arr)))]).mean()
print T_c
return T_c
def init_models(self):
self.models = {
'aac': pd.read_json('{"Arbutus":{"size":"795","stranding":37,"ind_d":0.00372364,"cable_d":0.0260604,"r_dc":0.0000711942,"r_ac_25":0.0000744751,"r_ac_50":0.0000813648,"r_ac_75":0.0000882546,"amp_75":878},"Aster":{"size":"2\\/0","stranding":7,"ind_d":0.00350266,"cable_d":0.0105156,"r_dc":0.0004265092,"r_ac_25":0.0004350394,"r_ac_50":0.0004776903,"r_ac_75":0.0005206693,"amp_75":286},"Bluebell":{"size":"1033.5","stranding":37,"ind_d":0.00424434,"cable_d":0.029718,"r_dc":0.00005479,"r_ac_25":0.0000580709,"r_ac_50":0.0000633202,"r_ac_75":0.0000688976,"amp_75":1031},"Bluebonnet":{"size":"3500","stranding":127,"ind_d":0.0042164,"cable_d":0.0548132,"r_dc":0.0000163714,"r_ac_25":0.0000223097,"r_ac_50":0.000023622,"r_ac_75":0.0000249344,"amp_75":2024},"Canna":{"size":"397.5","stranding":19,"ind_d":0.00367284,"cable_d":0.0183642,"r_dc":0.0001427165,"r_ac_25":0.0001459974,"r_ac_50":0.0001604331,"r_ac_75":0.0001751969,"amp_75":570},"Carnation":{"size":"1431","stranding":61,"ind_d":0.00389128,"cable_d":0.0350012,"r_dc":0.0000396982,"r_ac_25":0.0000433071,"r_ac_50":0.000046916,"r_ac_75":0.000050853,"amp_75":1253},"Cockscomb":{"size":"900","stranding":37,"ind_d":0.0039624,"cable_d":0.0277368,"r_dc":0.0000629921,"r_ac_25":0.0000659449,"r_ac_50":0.0000721785,"r_ac_75":0.0000784121,"amp_75":948},"Columbine":{"size":"1351.5","stranding":61,"ind_d":0.00377952,"cable_d":0.034036,"r_dc":0.0000419948,"r_ac_25":0.0000452756,"r_ac_50":0.0000495407,"r_ac_75":0.0000534777,"amp_75":1212},"Coreopsis":{"size":"1590.0","stranding":61,"ind_d":0.00409956,"cable_d":0.0369062,"r_dc":0.0000357612,"r_ac_25":0.0000393701,"r_ac_50":0.0000426509,"r_ac_75":0.0000462598,"amp_75":1333},"Cosmos":{"size":"477","stranding":19,"ind_d":0.00402336,"cable_d":0.0201168,"r_dc":0.0001187664,"r_ac_25":0.0001223753,"r_ac_50":0.0001341864,"r_ac_75":0.0001459974,"amp_75":639},"Cowslip":{"size":"2000.0","stranding":91,"ind_d":0.00376428,"cable_d":0.0414274,"r_dc":0.0000283465,"r_ac_25":0.0000324803,"r_ac_50":0.000035105,"r_ac_75":0.0000377297,"amp_75":1518},"Daffodil":{"size":"350","stranding":19,"ind_d":0.00344678,"cable_d":0.0172466,"r_dc":0.0001620735,"r_ac_25":0.0001660105,"r_ac_50":0.0001827428,"r_ac_75":0.0001988189,"amp_75":526},"Dahlia":{"size":"556.5","stranding":19,"ind_d":0.00434594,"cable_d":0.0217424,"r_dc":0.0001020341,"r_ac_25":0.0001049869,"r_ac_50":0.0001154856,"r_ac_75":0.0001256562,"amp_75":703},"Daisy":{"size":"266.8","stranding":7,"ind_d":0.00495808,"cable_d":0.0148844,"r_dc":0.0002125984,"r_ac_25":0.0002175197,"r_ac_50":0.0002385171,"r_ac_75":0.0002604987,"amp_75":443},"Goldenrod":{"size":"954.0","stranding":61,"ind_d":0.00317754,"cable_d":0.028575,"r_dc":0.0000593832,"r_ac_25":0.000062664,"r_ac_50":0.0000682415,"r_ac_75":0.0000744751,"amp_75":983},"Goldentuft":{"size":"450","stranding":19,"ind_d":0.00390906,"cable_d":0.0195326,"r_dc":0.0001259843,"r_ac_25":0.0001292651,"r_ac_50":0.0001423885,"r_ac_75":0.0001548556,"amp_75":616},"Hawkweed":{"size":"1000","stranding":37,"ind_d":0.00417576,"cable_d":0.0292354,"r_dc":0.0000567585,"r_ac_25":0.0000597113,"r_ac_50":0.0000652887,"r_ac_75":0.0000708661,"amp_75":1010},"Hawthorn":{"size":"1192.5","stranding":61,"ind_d":0.00355092,"cable_d":0.0319532,"r_dc":0.0000475722,"r_ac_25":0.000050853,"r_ac_50":0.0000554462,"r_ac_75":0.0000600394,"amp_75":1124},"Heuchera":{"size":"650","stranding":37,"ind_d":0.0033655,"cable_d":0.0235712,"r_dc":0.0000872703,"r_ac_25":0.0000902231,"r_ac_50":0.0000987533,"r_ac_75":0.0001062992,"amp_75":775},"Iris":{"size":"2","stranding":7,"ind_d":0.00247396,"cable_d":0.0074168,"r_dc":0.0008530184,"r_ac_25":0.000871063,"r_ac_50":0.000960958,"r_ac_75":0.0010439633,"amp_75":185},"Jessamine":{"size":"1750.0","stranding":61,"ind_d":0.00430276,"cable_d":0.0387096,"r_dc":0.0000324147,"r_ac_25":0.0000364173,"r_ac_50":0.0000393701,"r_ac_75":0.0000423228,"amp_75":1408},"Larkspur":{"size":"1033.5","stranding":61,"ind_d":0.00330708,"cable_d":0.0297434,"r_dc":0.00005479,"r_ac_25":0.0000580709,"r_ac_50":0.0000633202,"r_ac_75":0.0000688976,"amp_75":1032},"Laurel":{"size":"266.8","stranding":19,"ind_d":0.0030099,"cable_d":0.0150368,"r_dc":0.0002125984,"r_ac_25":0.0002175197,"r_ac_50":0.0002385171,"r_ac_75":0.0002604987,"amp_75":444},"Lilac":{"size":"795.0","stranding":61,"ind_d":0.00290068,"cable_d":0.0260858,"r_dc":0.0000711942,"r_ac_25":0.0000744751,"r_ac_50":0.0000813648,"r_ac_75":0.0000882546,"amp_75":879},"Lupine":{"size":"2500.0","stranding":91,"ind_d":0.00420878,"cable_d":0.0463042,"r_dc":0.0000229003,"r_ac_25":0.0000275591,"r_ac_50":0.0000298556,"r_ac_75":0.0000318241,"amp_75":1706},"Magnolia":{"size":"954.0","stranding":37,"ind_d":0.00407924,"cable_d":0.0285496,"r_dc":0.0000593832,"r_ac_25":0.000062664,"r_ac_50":0.0000682415,"r_ac_75":0.0000744751,"amp_75":982},"Marigold":{"size":"1113.0","stranding":61,"ind_d":0.00343154,"cable_d":0.0308864,"r_dc":0.000050853,"r_ac_25":0.0000541339,"r_ac_50":0.0000590551,"r_ac_75":0.0000639764,"amp_75":1079},"Meadowsweet":{"size":"600","stranding":37,"ind_d":0.00323342,"cable_d":0.0226314,"r_dc":0.0000748032,"r_ac_25":0.0000974409,"r_ac_50":0.0001069554,"r_ac_75":0.0001167979,"amp_75":738},"Mistletoe":{"size":"556.5","stranding":37,"ind_d":0.00311404,"cable_d":0.0217932,"r_dc":0.0001020341,"r_ac_25":0.0001049869,"r_ac_50":0.0001154856,"r_ac_75":0.0001256562,"amp_75":704},"Narcissus":{"size":"1272.0","stranding":61,"ind_d":0.00366776,"cable_d":0.03302,"r_dc":0.0000446194,"r_ac_25":0.0000479003,"r_ac_50":0.0000521654,"r_ac_75":0.0000567585,"amp_75":1169},"Nasturtium":{"size":"715.5","stranding":61,"ind_d":0.00275082,"cable_d":0.024765,"r_dc":0.0000793963,"r_ac_25":0.0000826772,"r_ac_50":0.0000902231,"r_ac_75":0.0000980971,"amp_75":823},"Orchid":{"size":"636.0","stranding":37,"ind_d":0.00332994,"cable_d":0.0233172,"r_dc":0.0000892388,"r_ac_25":0.0000925197,"r_ac_50":0.000101378,"r_ac_75":0.0001099081,"amp_75":765},"Oxlip":{"size":"4\\/0","stranding":7,"ind_d":0.00441706,"cable_d":0.0132588,"r_dc":0.0002680446,"r_ac_25":0.0002739501,"r_ac_50":0.000300853,"r_ac_75":0.000328084,"amp_75":383},"Pansy":{"size":"1","stranding":7,"ind_d":0.00277622,"cable_d":0.0083312,"r_dc":0.0006791339,"r_ac_25":0.0006922572,"r_ac_50":0.0007604987,"r_ac_75":0.0008290683,"amp_75":214},"Peachbell":{"size":"6","stranding":7,"ind_d":0.00155448,"cable_d":0.0046736,"r_dc":0.0021587927,"r_ac_25":0.0022063649,"r_ac_50":0.0024251969,"r_ac_75":0.002644029,"amp_75":103},"Peony":{"size":"300","stranding":19,"ind_d":0.00319278,"cable_d":0.0159512,"r_dc":0.0001889764,"r_ac_25":0.0001932415,"r_ac_50":0.0002125984,"r_ac_75":0.0002312992,"amp_75":478},"Petunia":{"size":"750.0","stranding":37,"ind_d":0.00361696,"cable_d":0.0253238,"r_dc":0.0000754593,"r_ac_25":0.0000823491,"r_ac_50":0.0000862861,"r_ac_75":0.000093832,"amp_75":847},"Phlox":{"size":"3\\/0","stranding":7,"ind_d":0.00393192,"cable_d":0.0117856,"r_dc":0.0003379265,"r_ac_25":0.0003454725,"r_ac_50":0.0003795932,"r_ac_75":0.0004130578,"amp_75":331},"Poppy":{"size":"1\\/0","stranding":7,"ind_d":0.00311912,"cable_d":0.0093472,"r_dc":0.0005380578,"r_ac_25":0.0005482284,"r_ac_50":0.0006026903,"r_ac_75":0.0006568242,"amp_75":247},"Rose":{"size":"4","stranding":7,"ind_d":0.00196088,"cable_d":0.0058928,"r_dc":0.0013582678,"r_ac_25":0.0013868111,"r_ac_50":0.0015239502,"r_ac_75":0.0016614174,"amp_75":138},"Sneezewort":{"size":"250","stranding":7,"ind_d":0.0048006,"cable_d":0.0144018,"r_dc":0.000226706,"r_ac_25":0.0002316273,"r_ac_50":0.0002549213,"r_ac_75":0.0002778871,"amp_75":425},"Syringa":{"size":"477.0","stranding":37,"ind_d":0.0028829,"cable_d":0.020193,"r_dc":0.0001187664,"r_ac_25":0.0001223753,"r_ac_50":0.0001341864,"r_ac_75":0.0001459974,"amp_75":639},"Trillium":{"size":"3000","stranding":127,"ind_d":0.00390398,"cable_d":0.0507492,"r_dc":0.0000190945,"r_ac_25":0.0000242782,"r_ac_50":0.0000259186,"r_ac_75":0.0000275591,"amp_75":1874},"Tulip":{"size":"336.4","stranding":19,"ind_d":0.00338074,"cable_d":0.016891,"r_dc":0.0001686352,"r_ac_25":0.0001729003,"r_ac_50":0.0001896326,"r_ac_75":0.0002063648,"amp_75":513},"Valerian":{"size":"250","stranding":19,"ind_d":0.00291338,"cable_d":0.0145796,"r_dc":0.000226706,"r_ac_25":0.0002316273,"r_ac_50":0.0002549213,"r_ac_75":0.0002778871,"amp_75":426},"Verbena":{"size":"700","stranding":37,"ind_d":0.0034925,"cable_d":0.0244602,"r_dc":0.0000810367,"r_ac_25":0.0000839895,"r_ac_50":0.0000918635,"r_ac_75":0.0001000656,"amp_75":812},"Violet":{"size":"715.5","stranding":37,"ind_d":0.00353314,"cable_d":0.0247142,"r_dc":0.0000793963,"r_ac_25":0.0000826772,"r_ac_50":0.0000902231,"r_ac_75":0.0000980971,"amp_75":823},"Zinnia":{"size":"500","stranding":19,"ind_d":0.00411988,"cable_d":0.0205994,"r_dc":0.0001135171,"r_ac_25":0.0001167979,"r_ac_50":0.0001279528,"r_ac_75":0.0001397638,"amp_75":658}}'),
'acsr': pd.read_json('{"Bittern":{"size":"1272","stranding":"45\\/7","al_d_ind":0.00426974,"st_d_ind":0.00284734,"stl_core_d":0.00853948,"cable_d":0.034163,"r_dc":0.0000442913,"r_ac_25":0.0000472441,"r_ac_50":0.0000515092,"r_ac_75":0.0000557743,"amp_75":1184.0},"Bluebird":{"size":"2156","stranding":"84\\/19","al_d_ind":0.00406908,"st_d_ind":0.00244348,"stl_core_d":0.01221232,"cable_d":0.0447548,"r_dc":0.0000262795,"r_ac_25":0.0000295276,"r_ac_50":0.0000321522,"r_ac_75":0.0000344488,"amp_75":1623.0},"Bluejay":{"size":"1113","stranding":"45\\/7","al_d_ind":0.00399542,"st_d_ind":0.00266192,"stl_core_d":0.0079883,"cable_d":0.0319532,"r_dc":0.000050853,"r_ac_25":0.0000534777,"r_ac_50":0.000058399,"r_ac_75":0.0000633202,"amp_75":1092.0},"Bobolink":{"size":"1431","stranding":"45\\/7","al_d_ind":0.00452882,"st_d_ind":0.00302006,"stl_core_d":0.00905764,"cable_d":0.0362458,"r_dc":0.0000393701,"r_ac_25":0.0000423228,"r_ac_50":0.0000462598,"r_ac_75":0.0000498688,"amp_75":1272.0},"Bunting":{"size":"1192.5","stranding":"45\\/7","al_d_ind":0.00413512,"st_d_ind":0.0027559,"stl_core_d":0.00827024,"cable_d":0.0330708,"r_dc":0.0000472441,"r_ac_25":0.0000498688,"r_ac_50":0.00005479,"r_ac_75":0.0000593832,"amp_75":1139.0},"Canary":{"size":"900","stranding":"54\\/7","al_d_ind":0.00327914,"st_d_ind":0.00327914,"stl_core_d":0.00983742,"cable_d":0.0295148,"r_dc":0.000062336,"r_ac_25":0.0000646325,"r_ac_50":0.0000708661,"r_ac_75":0.0000770997,"amp_75":961.0},"Cardinal":{"size":"954","stranding":"54\\/7","al_d_ind":0.00337566,"st_d_ind":0.00337566,"stl_core_d":0.01012698,"cable_d":0.0303784,"r_dc":0.000058727,"r_ac_25":0.0000610236,"r_ac_50":0.0000672572,"r_ac_75":0.0000728346,"amp_75":996.0},"Chickadee":{"size":"397.5","stranding":"18\\/1","al_d_ind":0.00377444,"st_d_ind":0.00377444,"stl_core_d":0.00377444,"cable_d":0.0188722,"r_dc":0.0001417323,"r_ac_25":0.0001453412,"r_ac_50":0.0001597769,"r_ac_75":0.0001732284,"amp_75":576.0},"Chukar":{"size":"1780","stranding":"84\\/19","al_d_ind":0.00369824,"st_d_ind":0.00221996,"stl_core_d":0.01109472,"cable_d":0.0406908,"r_dc":0.0000318241,"r_ac_25":0.0000347769,"r_ac_50":0.0000377297,"r_ac_75":0.0000410105,"amp_75":1453.0},"Cochin":{"size":"211.3","stranding":"12\\/7","al_d_ind":0.00337058,"st_d_ind":0.00337058,"stl_core_d":0.0101092,"cable_d":0.0168402,"r_dc":0.0002503281,"r_ac_25":0.0002598425,"r_ac_50":0.0003690945,"r_ac_75":0.0004301181,"amp_75":340.0},"Condor":{"size":"795","stranding":"54\\/7","al_d_ind":0.00308102,"st_d_ind":0.00308102,"stl_core_d":0.0092456,"cable_d":0.0277368,"r_dc":0.0000705381,"r_ac_25":0.0000728346,"r_ac_50":0.0000800525,"r_ac_75":0.0000869423,"amp_75":889.0},"Coot":{"size":"795","stranding":"36\\/1","al_d_ind":0.00377444,"st_d_ind":0.00377444,"stl_core_d":0.00377444,"cable_d":0.026416,"r_dc":0.0000711942,"r_ac_25":0.0000738189,"r_ac_50":0.0000810367,"r_ac_75":0.0000879265,"amp_75":884.0},"Curlew":{"size":"1033.5","stranding":"54\\/7","al_d_ind":0.00351282,"st_d_ind":0.00351282,"stl_core_d":0.010541,"cable_d":0.031623,"r_dc":0.0000541339,"r_ac_25":0.0000564304,"r_ac_50":0.0000620079,"r_ac_75":0.0000659449,"amp_75":1047.0},"Dipper":{"size":"1351.5","stranding":"45\\/7","al_d_ind":0.00440182,"st_d_ind":0.0029337,"stl_core_d":0.00880364,"cable_d":0.0352044,"r_dc":0.0000416667,"r_ac_25":0.0000446194,"r_ac_50":0.0000485564,"r_ac_75":0.0000528215,"amp_75":1229.0},"Dorking":{"size":"190.8","stranding":"12\\/7","al_d_ind":0.00320294,"st_d_ind":0.00320294,"stl_core_d":0.00960882,"cable_d":0.016002,"r_dc":0.000277231,"r_ac_25":0.0002870735,"r_ac_50":0.0004032152,"r_ac_75":0.0004671916,"amp_75":324.0},"Dotterel":{"size":"176.9","stranding":"12\\/7","al_d_ind":0.00308356,"st_d_ind":0.00308356,"stl_core_d":0.00925068,"cable_d":0.0154178,"r_dc":0.0002988845,"r_ac_25":0.0003100394,"r_ac_50":0.0004268373,"r_ac_75":0.0004963911,"amp_75":312.0},"Dove":{"size":"556.5","stranding":"26\\/7","al_d_ind":0.00371602,"st_d_ind":0.00289052,"stl_core_d":0.00866902,"cable_d":0.0235458,"r_dc":0.0001003937,"r_ac_25":0.0001030184,"r_ac_50":0.000113189,"r_ac_75":0.0001230315,"amp_75":726.0},"Drake":{"size":"795","stranding":"26\\/7","al_d_ind":0.00444246,"st_d_ind":0.0034544,"stl_core_d":0.0103632,"cable_d":0.0281178,"r_dc":0.00007021,"r_ac_25":0.0000728346,"r_ac_50":0.0000793963,"r_ac_75":0.0000862861,"amp_75":907.0},"Eagle":{"size":"556.5","stranding":"30\\/7","al_d_ind":0.00345948,"st_d_ind":0.00345948,"stl_core_d":0.01037844,"cable_d":0.0242062,"r_dc":0.0000994095,"r_ac_25":0.0001020341,"r_ac_50":0.0001118766,"r_ac_75":0.0001217192,"amp_75":734.0},"Egret":{"size":"636","stranding":"30\\/19","al_d_ind":0.00369824,"st_d_ind":0.00221996,"stl_core_d":0.01109472,"cable_d":0.0258826,"r_dc":0.0000872703,"r_ac_25":0.0000895669,"r_ac_50":0.0000980971,"r_ac_75":0.0001069554,"amp_75":798.0},"Falcon":{"size":"1590","stranding":"54\\/19","al_d_ind":0.00435864,"st_d_ind":0.0026162,"stl_core_d":0.01307592,"cable_d":0.0392176,"r_dc":0.0000354331,"r_ac_25":0.0000380577,"r_ac_50":0.0000413386,"r_ac_75":0.0000449475,"amp_75":1359.0},"Finch":{"size":"1113","stranding":"54\\/19","al_d_ind":0.00364744,"st_d_ind":0.00218694,"stl_core_d":0.01093978,"cable_d":0.0328168,"r_dc":0.0000505249,"r_ac_25":0.0000528215,"r_ac_50":0.0000577428,"r_ac_75":0.000062664,"amp_75":1093.0},"Flamingo":{"size":"666.6","stranding":"24\\/7","al_d_ind":0.00423418,"st_d_ind":0.00282194,"stl_core_d":0.00846582,"cable_d":0.0254,"r_dc":0.0000839895,"r_ac_25":0.0000862861,"r_ac_50":0.0000951444,"r_ac_75":0.0001030184,"amp_75":807.0},"Flicker":{"size":"477","stranding":"24\\/7","al_d_ind":0.0035814,"st_d_ind":0.0023876,"stl_core_d":0.00716026,"cable_d":0.0214884,"r_dc":0.0001174541,"r_ac_25":0.0001204068,"r_ac_50":0.0001322179,"r_ac_75":0.0001440289,"amp_75":655.0},"Grackle":{"size":"1192.5","stranding":"54\\/19","al_d_ind":0.00377444,"st_d_ind":0.00226568,"stl_core_d":0.01132332,"cable_d":0.0339598,"r_dc":0.0000472441,"r_ac_25":0.0000495407,"r_ac_50":0.0000541339,"r_ac_75":0.000058727,"amp_75":1140.0},"Grosbeak":{"size":"636","stranding":"26\\/7","al_d_ind":0.00397256,"st_d_ind":0.00308864,"stl_core_d":0.00926846,"cable_d":0.0251714,"r_dc":0.0000875984,"r_ac_25":0.0000902231,"r_ac_50":0.0000987533,"r_ac_75":0.0001076116,"amp_75":789.0},"Grouse":{"size":"80","stranding":"8\\/1","al_d_ind":0.00254,"st_d_ind":0.00423418,"stl_core_d":0.00423418,"cable_d":0.0093218,"r_dc":0.0006791339,"r_ac_25":0.0006922572,"r_ac_50":0.0007749344,"r_ac_75":0.0008569554,"amp_75":204.0},"Guinea":{"size":"159.0","stranding":"12\\/7","al_d_ind":0.00292354,"st_d_ind":0.00292354,"stl_core_d":0.00877062,"cable_d":0.0146304,"r_dc":0.0003313648,"r_ac_25":0.0003428478,"r_ac_50":0.0004678478,"r_ac_75":0.0005423229,"amp_75":297.0},"Hawk":{"size":"477","stranding":"26\\/7","al_d_ind":0.00343916,"st_d_ind":0.00267462,"stl_core_d":0.0080264,"cable_d":0.0217932,"r_dc":0.0001167979,"r_ac_25":0.0001200787,"r_ac_50":0.0001318898,"r_ac_75":0.0001437008,"amp_75":659.0},"Hen":{"size":"477","stranding":"30\\/7","al_d_ind":0.00320294,"st_d_ind":0.00320294,"stl_core_d":0.00960882,"cable_d":0.0224282,"r_dc":0.0001161417,"r_ac_25":0.0001187664,"r_ac_50":0.0001276247,"r_ac_75":0.0001423885,"amp_75":666.0},"Ibis":{"size":"397.5","stranding":"26\\/7","al_d_ind":0.00313944,"st_d_ind":0.00244348,"stl_core_d":0.0073279,"cable_d":0.0198882,"r_dc":0.00014042,"r_ac_25":0.0001437008,"r_ac_50":0.0001578084,"r_ac_75":0.0001722441,"amp_75":587.0},"Kingbird":{"size":"636","stranding":"18\\/1","al_d_ind":0.0047752,"st_d_ind":0.0047752,"stl_core_d":0.0047752,"cable_d":0.023876,"r_dc":0.0000885827,"r_ac_25":0.0000912074,"r_ac_50":0.0001003937,"r_ac_75":0.0001089239,"amp_75":773.0},"Kiwi":{"size":"2167","stranding":"72\\/7","al_d_ind":0.0044069,"st_d_ind":0.00293878,"stl_core_d":0.0088138,"cable_d":0.044069,"r_dc":0.0000262795,"r_ac_25":0.0000301837,"r_ac_50":0.0000324803,"r_ac_75":0.0000347769,"amp_75":1607.0},"Lapwing":{"size":"1590","stranding":"45\\/7","al_d_ind":0.0047752,"st_d_ind":0.00318262,"stl_core_d":0.00954786,"cable_d":0.0382016,"r_dc":0.0000354331,"r_ac_25":0.0000383858,"r_ac_50":0.0000416667,"r_ac_75":0.0000452756,"amp_75":1354.0},"Lark":{"size":"397.5","stranding":"30\\/7","al_d_ind":0.00292354,"st_d_ind":0.00292354,"stl_core_d":0.00877062,"cable_d":0.0204724,"r_dc":0.0001394357,"r_ac_25":0.0001423885,"r_ac_50":0.0001564961,"r_ac_75":0.0001702756,"amp_75":594.0},"Leghorn":{"size":"134.6","stranding":"12\\/7","al_d_ind":0.00268986,"st_d_ind":0.00268986,"stl_core_d":0.00806958,"cable_d":0.013462,"r_dc":0.0003937008,"r_ac_25":0.0004045276,"r_ac_50":0.0005374016,"r_ac_75":0.0006213911,"amp_75":273.0},"Linnet":{"size":"336.4","stranding":"26\\/7","al_d_ind":0.00288798,"st_d_ind":0.0022479,"stl_core_d":0.00674116,"cable_d":0.018288,"r_dc":0.0001656824,"r_ac_25":0.0001696194,"r_ac_50":0.0001863517,"r_ac_75":0.000203084,"amp_75":529.0},"Mallard":{"size":"795","stranding":"30\\/19","al_d_ind":0.00413512,"st_d_ind":0.00248158,"stl_core_d":0.01240536,"cable_d":0.028956,"r_dc":0.0000698819,"r_ac_25":0.0000721785,"r_ac_50":0.0000790682,"r_ac_75":0.0000856299,"amp_75":918.0},"Martin":{"size":"1351.5","stranding":"54\\/19","al_d_ind":0.00401828,"st_d_ind":0.00241046,"stl_core_d":0.01205484,"cable_d":0.0361696,"r_dc":0.0000416667,"r_ac_25":0.0000439633,"r_ac_50":0.0000482283,"r_ac_75":0.0000521654,"amp_75":1232.0},"Merlin":{"size":"336.4","stranding":"18\\/1","al_d_ind":0.00347218,"st_d_ind":0.00347218,"stl_core_d":0.00347218,"cable_d":0.0173736,"r_dc":0.0001673228,"r_ac_25":0.0001715879,"r_ac_50":0.0001883202,"r_ac_75":0.0002050525,"amp_75":519.0},"Minorca":{"size":"110.8","stranding":"12\\/7","al_d_ind":0.00244094,"st_d_ind":0.00244348,"stl_core_d":0.0073279,"cable_d":0.0122174,"r_dc":0.0004757218,"r_ac_25":0.0004891732,"r_ac_50":0.0006338583,"r_ac_75":0.0007326116,"amp_75":248.0},"Oriole":{"size":"336.4","stranding":"30\\/7","al_d_ind":0.00268986,"st_d_ind":0.00268986,"stl_core_d":0.00806958,"cable_d":0.0188214,"r_dc":0.0001646982,"r_ac_25":0.0001683071,"r_ac_50":0.0001847113,"r_ac_75":0.0002014436,"amp_75":535.0},"Ortolan":{"size":"1033.5","stranding":"45\\/7","al_d_ind":0.0038481,"st_d_ind":0.0025654,"stl_core_d":0.00769874,"cable_d":0.0307848,"r_dc":0.00005479,"r_ac_25":0.0000574147,"r_ac_50":0.000062664,"r_ac_75":0.0000682415,"amp_75":1043.0},"Osprey":{"size":"556.5","stranding":"18\\/1","al_d_ind":0.00446532,"st_d_ind":0.00446532,"stl_core_d":0.00446532,"cable_d":0.0223266,"r_dc":0.0001010499,"r_ac_25":0.0001043307,"r_ac_50":0.0001141732,"r_ac_75":0.0001243438,"amp_75":711.0},"Parakeet":{"size":"556.5","stranding":"24\\/7","al_d_ind":0.00386842,"st_d_ind":0.0025781,"stl_core_d":0.0077343,"cable_d":0.0232156,"r_dc":0.0001007218,"r_ac_25":0.0001030184,"r_ac_50":0.0001138451,"r_ac_75":0.0001236877,"amp_75":721.0},"Partridge":{"size":"266.8","stranding":"26\\/7","al_d_ind":0.00257302,"st_d_ind":0.00200152,"stl_core_d":0.00600202,"cable_d":0.0163068,"r_dc":0.0002089895,"r_ac_25":0.0002139108,"r_ac_50":0.000234252,"r_ac_75":0.0002552494,"amp_75":475.0},"Peacock":{"size":"605","stranding":"24\\/7","al_d_ind":0.00403352,"st_d_ind":0.00268986,"stl_core_d":0.00806958,"cable_d":0.0242062,"r_dc":0.0000925197,"r_ac_25":0.0000951444,"r_ac_50":0.0001240158,"r_ac_75":0.0001138451,"amp_75":760.0},"Pelican":{"size":"477","stranding":"18\\/1","al_d_ind":0.00413512,"st_d_ind":0.00413512,"stl_core_d":0.00413512,"cable_d":0.0206756,"r_dc":0.0001181102,"r_ac_25":0.000121063,"r_ac_50":0.000132874,"r_ac_75":0.000144685,"amp_75":646.0},"Penguin":{"size":"4\\/0","stranding":"6\\/1","al_d_ind":0.00477012,"st_d_ind":0.00477012,"stl_core_d":0.00477012,"cable_d":0.0143002,"r_dc":0.0002608268,"r_ac_25":0.000269685,"r_ac_50":0.0003497375,"r_ac_75":0.0003795932,"amp_75":357.0},"Petrel":{"size":"101.8","stranding":"12\\/7","al_d_ind":0.00233934,"st_d_ind":0.00233934,"stl_core_d":0.00701802,"cable_d":0.0117094,"r_dc":0.0005183727,"r_ac_25":0.0005331365,"r_ac_50":0.00067979,"r_ac_75":0.0007854331,"amp_75":237.0},"Pheasant":{"size":"1272","stranding":"54\\/19","al_d_ind":0.0038989,"st_d_ind":0.00233934,"stl_core_d":0.0116967,"cable_d":0.0350774,"r_dc":0.0000442913,"r_ac_25":0.0000465879,"r_ac_50":0.000050853,"r_ac_75":0.0000554462,"amp_75":1187.0},"Pigeon":{"size":"3\\/0","stranding":"6\\/1","al_d_ind":0.00424688,"st_d_ind":0.00424688,"stl_core_d":0.00424688,"cable_d":0.0127508,"r_dc":0.000328084,"r_ac_25":0.0003392389,"r_ac_50":0.0003963255,"r_ac_75":0.0004740814,"amp_75":315.0},"Quail":{"size":"2\\/0","stranding":"6\\/1","al_d_ind":0.00378206,"st_d_ind":0.00378206,"stl_core_d":0.00378206,"cable_d":0.0113538,"r_dc":0.0004133858,"r_ac_25":0.0004268373,"r_ac_50":0.0005301837,"r_ac_75":0.0005774278,"amp_75":276.0},"Rail":{"size":"954","stranding":"45\\/7","al_d_ind":0.00369824,"st_d_ind":0.00246634,"stl_core_d":0.00739648,"cable_d":0.029591,"r_dc":0.0000590551,"r_ac_25":0.0000616798,"r_ac_50":0.0000675853,"r_ac_75":0.0000731627,"amp_75":993.0},"Raven":{"size":"1\\/0","stranding":"6\\/1","al_d_ind":0.00337058,"st_d_ind":0.00337058,"stl_core_d":0.00337058,"cable_d":0.0101092,"r_dc":0.0005216536,"r_ac_25":0.0005357612,"r_ac_50":0.0006469816,"r_ac_75":0.0007089895,"amp_75":242.0},"Redwing":{"size":"715.5","stranding":"30\\/19","al_d_ind":0.00392176,"st_d_ind":0.00235458,"stl_core_d":0.01176782,"cable_d":0.0274574,"r_dc":0.0000774278,"r_ac_25":0.0000793963,"r_ac_50":0.0000875984,"r_ac_75":0.0000951444,"amp_75":859.0},"Robin":{"size":"1","stranding":"6\\/1","al_d_ind":0.00299974,"st_d_ind":0.00299974,"stl_core_d":0.00299974,"cable_d":0.0089916,"r_dc":0.0006594488,"r_ac_25":0.000675525,"r_ac_50":0.0008116798,"r_ac_75":0.0008868111,"amp_75":212.0},"Rook":{"size":"636","stranding":"24\\/7","al_d_ind":0.00413512,"st_d_ind":0.0027559,"stl_core_d":0.00827024,"cable_d":0.0248158,"r_dc":0.0000879265,"r_ac_25":0.0000908793,"r_ac_50":0.0000984252,"r_ac_75":0.0001082677,"amp_75":784.0},"Ruddy":{"size":"900","stranding":"45\\/7","al_d_ind":0.00359156,"st_d_ind":0.00239522,"stl_core_d":0.00718312,"cable_d":0.0287274,"r_dc":0.000062664,"r_ac_25":0.0000656168,"r_ac_50":0.0000715223,"r_ac_75":0.0000777559,"amp_75":958.0},"Sparate":{"size":"2","stranding":"7\\/1","al_d_ind":0.00247396,"st_d_ind":0.00329692,"stl_core_d":0.00329692,"cable_d":0.008255,"r_dc":0.0008234908,"r_ac_25":0.0008408793,"r_ac_50":0.0009730971,"r_ac_75":0.0010816929,"amp_75":184.0},"Sparrow":{"size":"2","stranding":"6\\/1","al_d_ind":0.00267208,"st_d_ind":0.00267208,"stl_core_d":0.00267208,"cable_d":0.0080264,"r_dc":0.0008333334,"r_ac_25":0.0008500656,"r_ac_50":0.0010104987,"r_ac_75":0.0011023622,"amp_75":184.0},"Starling":{"size":"715.5","stranding":"26\\/7","al_d_ind":0.00421386,"st_d_ind":0.0032766,"stl_core_d":0.00983234,"cable_d":0.0266954,"r_dc":0.000078084,"r_ac_25":0.0000800525,"r_ac_50":0.0000882546,"r_ac_75":0.0000958005,"amp_75":849.0},"Swan":{"size":"4","stranding":"6\\/1","al_d_ind":0.00211836,"st_d_ind":0.00211836,"stl_core_d":0.00211836,"cable_d":0.00635,"r_dc":0.0013221785,"r_ac_25":0.001351378,"r_ac_50":0.0015728347,"r_ac_75":0.0017119423,"amp_75":140.0},"Swanate":{"size":"4","stranding":"7\\/1","al_d_ind":0.00196088,"st_d_ind":0.0026162,"stl_core_d":0.0026162,"cable_d":0.0065278,"r_dc":0.0013090552,"r_ac_25":0.001335958,"r_ac_50":0.0015200132,"r_ac_75":0.0016945539,"amp_75":140.0},"Swift":{"size":"636.0","stranding":"36\\/1","al_d_ind":0.00337566,"st_d_ind":0.00337566,"stl_core_d":0.00337566,"cable_d":0.023622,"r_dc":0.0000889108,"r_ac_25":0.0000921916,"r_ac_50":0.0001007218,"r_ac_75":0.0001095801,"amp_75":769.0},"Tern":{"size":"795","stranding":"45\\/7","al_d_ind":0.00337566,"st_d_ind":0.00225044,"stl_core_d":0.00675132,"cable_d":0.0270002,"r_dc":0.0000708661,"r_ac_25":0.0000738189,"r_ac_50":0.0000807087,"r_ac_75":0.0000875984,"amp_75":887.0},"Turkey":{"size":"6","stranding":"6\\/1","al_d_ind":0.00167894,"st_d_ind":0.00167894,"stl_core_d":0.00167894,"cable_d":0.0050292,"r_dc":0.0021030184,"r_ac_25":0.0021499345,"r_ac_50":0.00246063,"r_ac_75":0.0026768374,"amp_75":105.0},"Waxwing":{"size":"266.8","stranding":"18\\/1","al_d_ind":0.00309118,"st_d_ind":0.00309118,"stl_core_d":0.00309118,"cable_d":0.0154686,"r_dc":0.000210958,"r_ac_25":0.0002155512,"r_ac_50":0.0002372047,"r_ac_75":0.0002585302,"amp_75":449.0}}'),
'acss': pd.read_json('{"Spoonbill":{"size":266.8,"stranding":"22\\/7","al_d_ind":0.00279654,"st_d_ind":0.00155448,"cable_d":0.0158496,"r_dc":0.0002047244,"r_ac_50":0.0002096457,"r_ac_75":0.0002519685,"r_ac_200":0.0003579396,"amp_75":455},"Scaup":{"size":266.8,"stranding":"24\\/7","al_d_ind":0.00267716,"st_d_ind":0.00178562,"cable_d":0.0160782,"r_dc":0.0002040682,"r_ac_50":0.0002086614,"r_ac_75":0.0002509843,"r_ac_200":0.0003562992,"amp_75":460},"Partridge":{"size":266.8,"stranding":"26\\/7","al_d_ind":0.00257302,"st_d_ind":0.00200152,"cable_d":0.0163068,"r_dc":0.000203084,"r_ac_50":0.0002076772,"r_ac_75":0.0002496719,"r_ac_200":0.0003546588,"amp_75":460},"Junco":{"size":266.8,"stranding":"30\\/7","al_d_ind":0.00239522,"st_d_ind":0.00239522,"cable_d":0.016764,"r_dc":0.0002017717,"r_ac_50":0.0002063648,"r_ac_75":0.0002480315,"r_ac_200":0.0003520341,"amp_75":465},"Ostrich":{"size":300.0,"stranding":"26\\/7","al_d_ind":0.00272796,"st_d_ind":0.0021209,"cable_d":0.017272,"r_dc":0.0001807743,"r_ac_50":0.0001847113,"r_ac_75":0.0002221129,"r_ac_200":0.0003156168,"amp_75":500},"Trogon":{"size":336.4,"stranding":"20\\/7","al_d_ind":0.00329438,"st_d_ind":0.00146304,"cable_d":0.0175768,"r_dc":0.0001630577,"r_ac_50":0.0001669948,"r_ac_75":0.0002007874,"r_ac_200":0.000285105,"amp_75":525},"Woodcock":{"size":336.4,"stranding":"22\\/7","al_d_ind":0.00314198,"st_d_ind":0.00174498,"cable_d":0.0178054,"r_dc":0.0001624016,"r_ac_50":0.0001663386,"r_ac_75":0.0001998032,"r_ac_200":0.0002841207,"amp_75":530},"Widgeon":{"size":336.4,"stranding":"24\\/7","al_d_ind":0.00300736,"st_d_ind":0.00200406,"cable_d":0.018034,"r_dc":0.0001617454,"r_ac_50":0.0001656824,"r_ac_75":0.000199147,"r_ac_200":0.0002828084,"amp_75":530},"Linnet":{"size":336.4,"stranding":"26\\/7","al_d_ind":0.00288798,"st_d_ind":0.00224536,"cable_d":0.018288,"r_dc":0.0001610892,"r_ac_50":0.0001650263,"r_ac_75":0.0001981627,"r_ac_200":0.0002814961,"amp_75":535},"Oriole":{"size":336.4,"stranding":"30\\/7","al_d_ind":0.00268986,"st_d_ind":0.00268986,"cable_d":0.0188214,"r_dc":0.000160105,"r_ac_50":0.0001637139,"r_ac_75":0.0001968504,"r_ac_200":0.0002791995,"amp_75":540},"Ptarmigan":{"size":397.5,"stranding":"20\\/7","al_d_ind":0.0035814,"st_d_ind":0.00159258,"cable_d":0.0191008,"r_dc":0.0001381234,"r_ac_50":0.0001414042,"r_ac_75":0.0001699475,"r_ac_200":0.0002414698,"amp_75":585},"Stork":{"size":397.5,"stranding":"22\\/7","al_d_ind":0.00341376,"st_d_ind":0.00189738,"cable_d":0.0193548,"r_dc":0.0001374672,"r_ac_50":0.0001410761,"r_ac_75":0.0001692913,"r_ac_200":0.0002404856,"amp_75":585},"Brant":{"size":397.5,"stranding":"24\\/7","al_d_ind":0.00326898,"st_d_ind":0.00217932,"cable_d":0.0196088,"r_dc":0.000136811,"r_ac_50":0.00014042,"r_ac_75":0.0001686352,"r_ac_200":0.0002395013,"amp_75":590},"Ibis":{"size":397.5,"stranding":"26\\/7","al_d_ind":0.00313944,"st_d_ind":0.00244094,"cable_d":0.0198882,"r_dc":0.0001364829,"r_ac_50":0.0001397638,"r_ac_75":0.000167979,"r_ac_200":0.000238189,"amp_75":595},"Lark":{"size":397.5,"stranding":"30\\/7","al_d_ind":0.00292354,"st_d_ind":0.00292354,"cable_d":0.0204724,"r_dc":0.0001354987,"r_ac_50":0.0001387795,"r_ac_75":0.0001666667,"r_ac_200":0.0002365486,"amp_75":600},"Tailorbird":{"size":477.0,"stranding":"20\\/7","al_d_ind":0.00392176,"st_d_ind":0.00174244,"cable_d":0.0209296,"r_dc":0.0001151575,"r_ac_50":0.0001181102,"r_ac_75":0.0001420604,"r_ac_200":0.0002014436,"amp_75":655},"Toucan":{"size":477.0,"stranding":"22\\/7","al_d_ind":0.00373888,"st_d_ind":0.00207772,"cable_d":0.0211836,"r_dc":0.0001145013,"r_ac_50":0.0001177822,"r_ac_75":0.0001414042,"r_ac_200":0.0002004593,"amp_75":660},"Flicker":{"size":477.0,"stranding":"24\\/7","al_d_ind":0.0035814,"st_d_ind":0.0023876,"cable_d":0.0214884,"r_dc":0.0001141732,"r_ac_50":0.000117126,"r_ac_75":0.000140748,"r_ac_200":0.0001994751,"amp_75":660},"Hawk":{"size":477.0,"stranding":"26\\/7","al_d_ind":0.00343916,"st_d_ind":0.00267462,"cable_d":0.0217932,"r_dc":0.0001135171,"r_ac_50":0.0001164698,"r_ac_75":0.0001400919,"r_ac_200":0.0001984908,"amp_75":665},"Hen":{"size":477.0,"stranding":"30\\/7","al_d_ind":0.00320294,"st_d_ind":0.00320294,"cable_d":0.0224282,"r_dc":0.0001128609,"r_ac_50":0.0001158137,"r_ac_75":0.0001387795,"r_ac_200":0.0001971785,"amp_75":675},"Heron":{"size":500.0,"stranding":"30\\/7","al_d_ind":0.00327914,"st_d_ind":0.00327914,"cable_d":0.0229616,"r_dc":0.0001076116,"r_ac_50":0.0001105643,"r_ac_75":0.0001325459,"r_ac_200":0.0001879921,"amp_75":695},"Tody":{"size":556.5,"stranding":"20\\/7","al_d_ind":0.00423672,"st_d_ind":0.00188214,"cable_d":0.022606,"r_dc":0.0000987533,"r_ac_50":0.000101378,"r_ac_75":0.0001217192,"r_ac_200":0.0001729003,"amp_75":720},"Sapsucker":{"size":556.5,"stranding":"22\\/7","al_d_ind":0.0040386,"st_d_ind":0.00224282,"cable_d":0.0228854,"r_dc":0.0000980971,"r_ac_50":0.0001010499,"r_ac_75":0.0001213911,"r_ac_200":0.000171916,"amp_75":725},"Parakeet":{"size":556.5,"stranding":"24\\/7","al_d_ind":0.00386842,"st_d_ind":0.0025781,"cable_d":0.0232156,"r_dc":0.000097769,"r_ac_50":0.0001003937,"r_ac_75":0.0001207349,"r_ac_200":0.0001712598,"amp_75":730},"Dove":{"size":556.5,"stranding":"26\\/7","al_d_ind":0.00371602,"st_d_ind":0.00289052,"cable_d":0.0235458,"r_dc":0.0000974409,"r_ac_50":0.0001000656,"r_ac_75":0.0001200787,"r_ac_200":0.0001702756,"amp_75":735},"Eagle":{"size":556.5,"stranding":"30\\/7","al_d_ind":0.00345948,"st_d_ind":0.00345948,"cable_d":0.0242062,"r_dc":0.0000967848,"r_ac_50":0.0000994095,"r_ac_75":0.0001190945,"r_ac_200":0.0001689633,"amp_75":740},"Peacock":{"size":605.0,"stranding":"24\\/7","al_d_ind":0.00403352,"st_d_ind":0.00268986,"cable_d":0.0242062,"r_dc":0.000089895,"r_ac_50":0.0000925197,"r_ac_75":0.0001112205,"r_ac_200":0.0001574803,"amp_75":770},"Squab":{"size":605.0,"stranding":"26\\/7","al_d_ind":0.0038735,"st_d_ind":0.00301244,"cable_d":0.0245364,"r_dc":0.0000895669,"r_ac_50":0.0000921916,"r_ac_75":0.0001105643,"r_ac_200":0.0001568242,"amp_75":775},"WoodDuck":{"size":605.0,"stranding":"30\\/7","al_d_ind":0.0036068,"st_d_ind":0.0036068,"cable_d":0.0252476,"r_dc":0.0000889108,"r_ac_50":0.0000915354,"r_ac_75":0.0001095801,"r_ac_200":0.0001555118,"amp_75":780},"Teal":{"size":605.0,"stranding":"30\\/19","al_d_ind":0.0036068,"st_d_ind":0.00216408,"cable_d":0.0252476,"r_dc":0.0000892388,"r_ac_50":0.0000915354,"r_ac_75":0.0001099081,"r_ac_200":0.0001555118,"amp_75":780},"Turacos":{"size":636.0,"stranding":"20\\/7","al_d_ind":0.00452882,"st_d_ind":0.00201168,"cable_d":0.0241554,"r_dc":0.0000862861,"r_ac_50":0.0000889108,"r_ac_75":0.0001066273,"r_ac_200":0.0001512467,"amp_75":785},"Goldfinch":{"size":636.0,"stranding":"22\\/7","al_d_ind":0.004318,"st_d_ind":0.00239776,"cable_d":0.0244602,"r_dc":0.000085958,"r_ac_50":0.0000885827,"r_ac_75":0.0001062992,"r_ac_200":0.0001505906,"amp_75":790},"Rook":{"size":636.0,"stranding":"24\\/7","al_d_ind":0.00413512,"st_d_ind":0.0027559,"cable_d":0.0248158,"r_dc":0.0000856299,"r_ac_50":0.0000882546,"r_ac_75":0.000105643,"r_ac_200":0.0001499344,"amp_75":790},"Grosbeak":{"size":636.0,"stranding":"26\\/7","al_d_ind":0.00397256,"st_d_ind":0.00308864,"cable_d":0.025146,"r_dc":0.0000853018,"r_ac_50":0.0000875984,"r_ac_75":0.000105315,"r_ac_200":0.0001492782,"amp_75":795},"Scoter":{"size":636.0,"stranding":"30\\/7","al_d_ind":0.00369824,"st_d_ind":0.00369824,"cable_d":0.0258826,"r_dc":0.0000846457,"r_ac_50":0.0000869423,"r_ac_75":0.0001043307,"r_ac_200":0.0001479659,"amp_75":805},"Egret":{"size":636.0,"stranding":"30\\/19","al_d_ind":0.00369824,"st_d_ind":0.00221996,"cable_d":0.0258826,"r_dc":0.0000846457,"r_ac_50":0.0000869423,"r_ac_75":0.0001043307,"r_ac_200":0.0001479659,"amp_75":805},"Flamingo":{"size":666.6,"stranding":"24\\/7","al_d_ind":0.00423418,"st_d_ind":0.00282194,"cable_d":0.0254,"r_dc":0.0000816929,"r_ac_50":0.0000843176,"r_ac_75":0.0001010499,"r_ac_200":0.0001430446,"amp_75":815},"Gannet":{"size":666.6,"stranding":"26\\/7","al_d_ind":0.00406654,"st_d_ind":0.0031623,"cable_d":0.0257556,"r_dc":0.0000813648,"r_ac_50":0.0000836614,"r_ac_75":0.0001003937,"r_ac_200":0.0001423885,"amp_75":820},"Stilt":{"size":715.5,"stranding":"24\\/7","al_d_ind":0.00438658,"st_d_ind":0.00292354,"cable_d":0.0263144,"r_dc":0.0000761155,"r_ac_50":0.0000784121,"r_ac_75":0.0000941601,"r_ac_200":0.0001332021,"amp_75":855},"Starling":{"size":715.5,"stranding":"26\\/7","al_d_ind":0.00421386,"st_d_ind":0.0032766,"cable_d":0.0266954,"r_dc":0.0000757874,"r_ac_50":0.000078084,"r_ac_75":0.000093832,"r_ac_200":0.0001325459,"amp_75":860},"Redwing":{"size":715.5,"stranding":"30\\/19","al_d_ind":0.00392176,"st_d_ind":0.00235204,"cable_d":0.0274574,"r_dc":0.0000754593,"r_ac_50":0.0000774278,"r_ac_75":0.0000928478,"r_ac_200":0.0001315617,"amp_75":870},"Macaw":{"size":795.0,"stranding":"42\\/7","al_d_ind":0.00349504,"st_d_ind":0.00194056,"cable_d":0.026797,"r_dc":0.0000692257,"r_ac_50":0.0000725066,"r_ac_75":0.0000895669,"r_ac_200":0.0001299213,"amp_75":880},"Turbit":{"size":795.0,"stranding":"20\\/7","al_d_ind":0.00506476,"st_d_ind":0.00225044,"cable_d":0.0270002,"r_dc":0.0000688976,"r_ac_50":0.0000715223,"r_ac_75":0.0000856299,"r_ac_200":0.0001213911,"amp_75":900},"Tern":{"size":795.0,"stranding":"45\\/7","al_d_ind":0.00337566,"st_d_ind":0.00225044,"cable_d":0.0270002,"r_dc":0.0000688976,"r_ac_50":0.0000721785,"r_ac_75":0.0000892388,"r_ac_200":0.0001295932,"amp_75":880},"Puffin":{"size":795.0,"stranding":"22\\/7","al_d_ind":0.00482854,"st_d_ind":0.00268224,"cable_d":0.0273558,"r_dc":0.0000688976,"r_ac_50":0.0000711942,"r_ac_75":0.0000853018,"r_ac_200":0.0001207349,"amp_75":905},"Cuckoo":{"size":795.0,"stranding":"24\\/7","al_d_ind":0.0046228,"st_d_ind":0.00308102,"cable_d":0.0277368,"r_dc":0.0000685696,"r_ac_50":0.0000708661,"r_ac_75":0.0000849738,"r_ac_200":0.0001200787,"amp_75":910},"Condor":{"size":795.0,"stranding":"54\\/7","al_d_ind":0.00308102,"st_d_ind":0.00308102,"cable_d":0.0277368,"r_dc":0.0000685696,"r_ac_50":0.0000711942,"r_ac_75":0.0000885827,"r_ac_200":0.0001286089,"amp_75":890},"Drake":{"size":795.0,"stranding":"26\\/7","al_d_ind":0.00444246,"st_d_ind":0.0034544,"cable_d":0.0281178,"r_dc":0.0000682415,"r_ac_50":0.0000705381,"r_ac_75":0.0000843176,"r_ac_200":0.0001194226,"amp_75":915},"Mallard":{"size":795.0,"stranding":"30\\/19","al_d_ind":0.00413512,"st_d_ind":0.00248158,"cable_d":0.028956,"r_dc":0.0000679134,"r_ac_50":0.0000698819,"r_ac_75":0.0000836614,"r_ac_200":0.0001187664,"amp_75":925},"Ruddy":{"size":900.0,"stranding":"45\\/7","al_d_ind":0.00359156,"st_d_ind":0.00239522,"cable_d":0.0287274,"r_dc":0.0000610236,"r_ac_50":0.0000639764,"r_ac_75":0.0000790682,"r_ac_200":0.0001145013,"amp_75":955},"Canary":{"size":900.0,"stranding":"54\\/7","al_d_ind":0.00327914,"st_d_ind":0.00327914,"cable_d":0.0295148,"r_dc":0.0000603675,"r_ac_50":0.0000633202,"r_ac_75":0.0000784121,"r_ac_200":0.0001135171,"amp_75":965},"Phoenix":{"size":954.0,"stranding":"42\\/7","al_d_ind":0.00382778,"st_d_ind":0.00212598,"cable_d":0.029337,"r_dc":0.0000577428,"r_ac_50":0.0000606955,"r_ac_75":0.0000748032,"r_ac_200":0.0001082677,"amp_75":985},"Corncrake":{"size":954.0,"stranding":"20\\/7","al_d_ind":0.00554736,"st_d_ind":0.00246634,"cable_d":0.029591,"r_dc":0.0000574147,"r_ac_50":0.0000600394,"r_ac_75":0.0000718504,"r_ac_200":0.000101378,"amp_75":1005},"Rail":{"size":954.0,"stranding":"45\\/7","al_d_ind":0.00369824,"st_d_ind":0.00246634,"cable_d":0.029591,"r_dc":0.0000574147,"r_ac_50":0.0000606955,"r_ac_75":0.0000748032,"r_ac_200":0.0001079396,"amp_75":990},"Towhee":{"size":954.0,"stranding":"48\\/7","al_d_ind":0.0035814,"st_d_ind":0.00278638,"cable_d":0.029845,"r_dc":0.0000574147,"r_ac_50":0.0000603675,"r_ac_75":0.0000744751,"r_ac_200":0.0001076116,"amp_75":990},"Redbird":{"size":954.0,"stranding":"24\\/7","al_d_ind":0.00506476,"st_d_ind":0.00337566,"cable_d":0.0303784,"r_dc":0.0000570866,"r_ac_50":0.0000593832,"r_ac_75":0.0000711942,"r_ac_200":0.0001003937,"amp_75":1020},"Cardinal":{"size":954.0,"stranding":"54\\/7","al_d_ind":0.00337566,"st_d_ind":0.00337566,"cable_d":0.0303784,"r_dc":0.0000570866,"r_ac_50":0.0000597113,"r_ac_75":0.0000738189,"r_ac_200":0.0001069554,"amp_75":1000},"Canvasback":{"size":954.0,"stranding":"30\\/19","al_d_ind":0.00452882,"st_d_ind":0.0027178,"cable_d":0.0316992,"r_dc":0.0000564304,"r_ac_50":0.000058399,"r_ac_75":0.00007021,"r_ac_200":0.0000990814,"amp_75":1040},"Snowbird":{"size":1033.5,"stranding":"42\\/7","al_d_ind":0.00398526,"st_d_ind":0.00221488,"cable_d":0.0305562,"r_dc":0.0000531496,"r_ac_50":0.0000564304,"r_ac_75":0.0000692257,"r_ac_200":0.0001000656,"amp_75":1035},"Ortolan":{"size":1033.5,"stranding":"45\\/7","al_d_ind":0.0038481,"st_d_ind":0.0025654,"cable_d":0.0307848,"r_dc":0.0000531496,"r_ac_50":0.0000561024,"r_ac_75":0.0000692257,"r_ac_200":0.0000997375,"amp_75":1040},"Whooper":{"size":1033.5,"stranding":"48\\/7","al_d_ind":0.00372618,"st_d_ind":0.00289814,"cable_d":0.0310642,"r_dc":0.0000528215,"r_ac_50":0.0000557743,"r_ac_75":0.0000688976,"r_ac_200":0.0000994095,"amp_75":1040},"Curlew":{"size":1033.5,"stranding":"54\\/7","al_d_ind":0.00351282,"st_d_ind":0.00351282,"cable_d":0.031623,"r_dc":0.0000528215,"r_ac_50":0.0000554462,"r_ac_75":0.0000682415,"r_ac_200":0.0000987533,"amp_75":1050},"Avocet":{"size":1113.0,"stranding":"42\\/7","al_d_ind":0.00413512,"st_d_ind":0.00229616,"cable_d":0.0316992,"r_dc":0.0000495407,"r_ac_50":0.0000524934,"r_ac_75":0.0000646325,"r_ac_200":0.0000928478,"amp_75":1080},"Bluejay":{"size":1113.0,"stranding":"45\\/7","al_d_ind":0.00399542,"st_d_ind":0.00266446,"cable_d":0.0319786,"r_dc":0.0000492126,"r_ac_50":0.0000521654,"r_ac_75":0.0000643045,"r_ac_200":0.0000925197,"amp_75":1085},"Bullfinch":{"size":1113.0,"stranding":"48\\/7","al_d_ind":0.00386842,"st_d_ind":0.00300736,"cable_d":0.0322326,"r_dc":0.0000492126,"r_ac_50":0.0000521654,"r_ac_75":0.0000639764,"r_ac_200":0.0000921916,"amp_75":1090},"Finch":{"size":1113.0,"stranding":"54\\/19","al_d_ind":0.00364744,"st_d_ind":0.00218694,"cable_d":0.0328422,"r_dc":0.0000492126,"r_ac_50":0.0000518373,"r_ac_75":0.0000639764,"r_ac_200":0.0000921916,"amp_75":1100},"Oxbird":{"size":1192.5,"stranding":"42\\/7","al_d_ind":0.0042799,"st_d_ind":0.00237744,"cable_d":0.0328168,"r_dc":0.0000462598,"r_ac_50":0.0000492126,"r_ac_75":0.0000603675,"r_ac_200":0.0000866142,"amp_75":1130},"Bunting":{"size":1192.5,"stranding":"45\\/7","al_d_ind":0.00413512,"st_d_ind":0.0027559,"cable_d":0.0330708,"r_dc":0.0000459318,"r_ac_50":0.0000492126,"r_ac_75":0.0000600394,"r_ac_200":0.0000862861,"amp_75":1135},"Cormorant":{"size":1192.5,"stranding":"48\\/7","al_d_ind":0.00400304,"st_d_ind":0.00311404,"cable_d":0.0333502,"r_dc":0.0000459318,"r_ac_50":0.0000488845,"r_ac_75":0.0000600394,"r_ac_200":0.0000862861,"amp_75":1140},"Grackle":{"size":1192.5,"stranding":"54\\/19","al_d_ind":0.00377444,"st_d_ind":0.00226568,"cable_d":0.0339852,"r_dc":0.0000459318,"r_ac_50":0.0000485564,"r_ac_75":0.0000597113,"r_ac_200":0.000085958,"amp_75":1145},"Scissortail":{"size":1272.0,"stranding":"42\\/7","al_d_ind":0.0044196,"st_d_ind":0.00245618,"cable_d":0.0338836,"r_dc":0.0000433071,"r_ac_50":0.0000465879,"r_ac_75":0.0000567585,"r_ac_200":0.0000813648,"amp_75":1175},"Bittern":{"size":1272.0,"stranding":"45\\/7","al_d_ind":0.00426974,"st_d_ind":0.00284734,"cable_d":0.034163,"r_dc":0.000042979,"r_ac_50":0.0000462598,"r_ac_75":0.0000564304,"r_ac_200":0.0000810367,"amp_75":1180},"Diver":{"size":1272.0,"stranding":"48\\/7","al_d_ind":0.00413512,"st_d_ind":0.00321564,"cable_d":0.0344678,"r_dc":0.000042979,"r_ac_50":0.0000459318,"r_ac_75":0.0000564304,"r_ac_200":0.0000807087,"amp_75":1185},"Pheasant":{"size":1272.0,"stranding":"54\\/19","al_d_ind":0.0038989,"st_d_ind":0.00233934,"cable_d":0.0350774,"r_dc":0.000042979,"r_ac_50":0.0000456037,"r_ac_75":0.0000561024,"r_ac_200":0.0000807087,"amp_75":1190},"Ringdove":{"size":1351.5,"stranding":"42\\/7","al_d_ind":0.00455676,"st_d_ind":0.00253238,"cable_d":0.0349504,"r_dc":0.0000406824,"r_ac_50":0.0000439633,"r_ac_75":0.0000534777,"r_ac_200":0.0000767717,"amp_75":1220},"Dipper":{"size":1351.5,"stranding":"45\\/7","al_d_ind":0.00440182,"st_d_ind":0.0029337,"cable_d":0.0352044,"r_dc":0.0000406824,"r_ac_50":0.0000436352,"r_ac_75":0.0000534777,"r_ac_200":0.0000764436,"amp_75":1225},"-none-":{"size":1351.5,"stranding":"48\\/7","al_d_ind":0.00426212,"st_d_ind":0.0033147,"cable_d":0.0355092,"r_dc":0.0000403543,"r_ac_50":0.0000436352,"r_ac_75":0.0000531496,"r_ac_200":0.0000761155,"amp_75":1230},"Martin":{"size":1351.5,"stranding":"54\\/19","al_d_ind":0.00401828,"st_d_ind":0.00241046,"cable_d":0.0361696,"r_dc":0.0000403543,"r_ac_50":0.0000433071,"r_ac_75":0.0000528215,"r_ac_200":0.0000757874,"amp_75":1235},"Popinjay":{"size":1431.0,"stranding":"42\\/7","al_d_ind":0.00468884,"st_d_ind":0.00260604,"cable_d":0.035941,"r_dc":0.0000383858,"r_ac_50":0.0000416667,"r_ac_75":0.000050853,"r_ac_200":0.0000725066,"amp_75":1260},"Bobolink":{"size":1431.0,"stranding":"45\\/7","al_d_ind":0.00452882,"st_d_ind":0.00302006,"cable_d":0.0362204,"r_dc":0.0000383858,"r_ac_50":0.0000416667,"r_ac_75":0.0000505249,"r_ac_200":0.0000721785,"amp_75":1265},"Wagtail":{"size":1431.0,"stranding":"48\\/7","al_d_ind":0.00438658,"st_d_ind":0.00341122,"cable_d":0.0365506,"r_dc":0.0000383858,"r_ac_50":0.0000413386,"r_ac_75":0.0000501969,"r_ac_200":0.0000718504,"amp_75":1270},"Plover":{"size":1431.0,"stranding":"54\\/19","al_d_ind":0.00413512,"st_d_ind":0.00248158,"cable_d":0.037211,"r_dc":0.0000383858,"r_ac_50":0.0000410105,"r_ac_75":0.0000501969,"r_ac_200":0.0000718504,"amp_75":1280},"Nuthatch":{"size":1510.5,"stranding":"45\\/7","al_d_ind":0.00465328,"st_d_ind":0.00310134,"cable_d":0.0372364,"r_dc":0.0000364173,"r_ac_50":0.0000396982,"r_ac_75":0.0000482283,"r_ac_200":0.0000685696,"amp_75":1310},"Parrot":{"size":1510.5,"stranding":"54\\/19","al_d_ind":0.00424688,"st_d_ind":0.00254762,"cable_d":0.038227,"r_dc":0.0000360892,"r_ac_50":0.000039042,"r_ac_75":0.0000475722,"r_ac_200":0.0000679134,"amp_75":1325},"Ratite":{"size":1590.0,"stranding":"42\\/7","al_d_ind":0.00494284,"st_d_ind":0.00274574,"cable_d":0.0378968,"r_dc":0.0000344488,"r_ac_50":0.0000380577,"r_ac_75":0.0000459318,"r_ac_200":0.0000652887,"amp_75":1340},"Lapwing":{"size":1590.0,"stranding":"45\\/7","al_d_ind":0.0047752,"st_d_ind":0.00318262,"cable_d":0.0382016,"r_dc":0.0000344488,"r_ac_50":0.0000377297,"r_ac_75":0.0000459318,"r_ac_200":0.0000652887,"amp_75":1350},"Hornbill":{"size":1590.0,"stranding":"48\\/7","al_d_ind":0.0046228,"st_d_ind":0.00359664,"cable_d":0.0385318,"r_dc":0.0000344488,"r_ac_50":0.0000374016,"r_ac_75":0.0000456037,"r_ac_200":0.0000649606,"amp_75":1355},"Falcon":{"size":1590.0,"stranding":"54\\/19","al_d_ind":0.00435864,"st_d_ind":0.0026162,"cable_d":0.039243,"r_dc":0.0000344488,"r_ac_50":0.0000374016,"r_ac_75":0.0000452756,"r_ac_200":0.0000646325,"amp_75":1365},"Chukar":{"size":1780.0,"stranding":"84\\/19","al_d_ind":0.00369824,"st_d_ind":0.00221996,"cable_d":0.0406908,"r_dc":0.0000308399,"r_ac_50":0.0000341207,"r_ac_75":0.0000400262,"r_ac_200":0.0000554462,"amp_75":1465},"Seahawk":{"size":1869.0,"stranding":"68\\/7","al_d_ind":0.00421132,"st_d_ind":0.00233934,"cable_d":0.0407162,"r_dc":0.0000295276,"r_ac_50":0.0000331365,"r_ac_75":0.000039042,"r_ac_200":0.0000534777,"amp_75":1490},"Mockingbird":{"size":2034.5,"stranding":"72\\/7","al_d_ind":0.00426974,"st_d_ind":0.00284734,"cable_d":0.0426974,"r_dc":0.000027231,"r_ac_50":0.0000308399,"r_ac_75":0.0000360892,"r_ac_200":0.0000495407,"amp_75":1565},"Roadrunner":{"size":2057.5,"stranding":"76\\/19","al_d_ind":0.0041783,"st_d_ind":0.00195072,"cable_d":0.04318,"r_dc":0.0000269029,"r_ac_50":0.0000305118,"r_ac_75":0.0000354331,"r_ac_200":0.0000488845,"amp_75":1580},"Bluebird":{"size":2156.0,"stranding":"84\\/19","al_d_ind":0.00406908,"st_d_ind":0.00244094,"cable_d":0.0447548,"r_dc":0.0000255906,"r_ac_50":0.0000288714,"r_ac_75":0.0000337927,"r_ac_200":0.0000462598,"amp_75":1640},"Kiwi":{"size":2167.0,"stranding":"72\\/7","al_d_ind":0.0044069,"st_d_ind":0.00293878,"cable_d":0.044069,"r_dc":0.0000255906,"r_ac_50":0.0000295276,"r_ac_75":0.0000341207,"r_ac_200":0.0000465879,"amp_75":1620},"Thrasher":{"size":2312.0,"stranding":"76\\/19","al_d_ind":0.00442976,"st_d_ind":0.00206756,"cable_d":0.0457708,"r_dc":0.0000239501,"r_ac_50":0.0000278871,"r_ac_75":0.0000321522,"r_ac_200":0.0000439633,"amp_75":1690},"Joree":{"size":2515.0,"stranding":"76\\/19","al_d_ind":0.00462026,"st_d_ind":0.00215646,"cable_d":0.047752,"r_dc":0.0000219816,"r_ac_50":0.0000259186,"r_ac_75":0.0000301837,"r_ac_200":0.0000406824,"amp_75":1765}}')
}
self.meta = {
'aac' : {
'D' : self.models[self.model][self.code_name]['cable_d'],
'R' : self.models[self.model][self.code_name][['r_ac_25', 'r_ac_50', 'r_ac_75']].values,
'T_range' : [298, 323, 348]
},
'acsr' : {
'D' : self.models[self.model][self.code_name]['cable_d'],
'R' : self.models[self.model][self.code_name][['r_ac_25', 'r_ac_50', 'r_ac_75']].values,
'T_range' : [298, 323, 348]
},
'acss' : {
'D' : self.models[self.model][self.code_name]['cable_d'],
'R' : self.models[self.model][self.code_name][['r_ac_50', 'r_ac_75', 'r_ac_200']].values,
'T_range' : [323, 348, 473]
}
}
| 272.8125
| 20,451
| 0.691123
| 10,206
| 52,380
| 3.258083
| 0.153047
| 0.058192
| 0.04932
| 0.038253
| 0.447522
| 0.416546
| 0.388789
| 0.339679
| 0.284554
| 0.244045
| 0
| 0.385503
| 0.053169
| 52,380
| 191
| 20,452
| 274.240838
| 0.284968
| 0.001527
| 0
| 0.233871
| 0
| 0.024194
| 0.899102
| 0.895955
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.024194
| null | null | 0.016129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
96c6adbbebadc9888e867e3b32768d9db27697a0
| 30,730
|
py
|
Python
|
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class show_mpls_neighbor_brief(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/show-mpls-rsvp-neighbor-detail/output/mpls-rsvp-neighbor-detail/show-mpls-neighbor-brief. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__mpls_rsvp_neighbor_ip_addr','__mpls_rsvp_neighbor_interface','__mpls_rsvp_neighbor_status','__mpls_rsvp_neighbor_last_status_change','__mpls_rsvp_neighbor_hello_tx','__mpls_rsvp_neighbor_hello_rx','__mpls_rsvp_neighbor_refresh_reduction_support','__mpls_rsvp_neighbor_msg_id_support',)
_yang_name = 'show-mpls-neighbor-brief'
_rest_name = 'show-mpls-neighbor-brief'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__mpls_rsvp_neighbor_interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="mpls-rsvp-neighbor-interface", rest_name="mpls-rsvp-neighbor-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
self.__mpls_rsvp_neighbor_status = YANGDynClass(base=unicode, is_leaf=True, yang_name="mpls-rsvp-neighbor-status", rest_name="mpls-rsvp-neighbor-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
self.__mpls_rsvp_neighbor_msg_id_support = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mpls-rsvp-neighbor-msg-id-support", rest_name="mpls-rsvp-neighbor-msg-id-support", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__mpls_rsvp_neighbor_last_status_change = YANGDynClass(base=unicode, is_leaf=True, yang_name="mpls-rsvp-neighbor-last-status-change", rest_name="mpls-rsvp-neighbor-last-status-change", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
self.__mpls_rsvp_neighbor_hello_rx = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-rsvp-neighbor-hello-rx", rest_name="mpls-rsvp-neighbor-hello-rx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
self.__mpls_rsvp_neighbor_ip_addr = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls_rsvp_neighbor_ip_addr", rest_name="mpls_rsvp_neighbor_ip_addr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-address', is_config=True)
self.__mpls_rsvp_neighbor_hello_tx = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-rsvp-neighbor-hello-tx", rest_name="mpls-rsvp-neighbor-hello-tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
self.__mpls_rsvp_neighbor_refresh_reduction_support = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mpls-rsvp-neighbor-refresh-reduction-support", rest_name="mpls-rsvp-neighbor-refresh-reduction-support", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'show-mpls-rsvp-neighbor-detail', u'output', u'mpls-rsvp-neighbor-detail', u'show-mpls-neighbor-brief']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'show-mpls-rsvp-neighbor-detail', u'output', u'mpls-rsvp-neighbor-detail', u'show-mpls-neighbor-brief']
def _get_mpls_rsvp_neighbor_ip_addr(self):
"""
Getter method for mpls_rsvp_neighbor_ip_addr, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_ip_addr (inet:ipv4-address)
YANG Description: MPLS RSVP Neighbor IP address
"""
return self.__mpls_rsvp_neighbor_ip_addr
def _set_mpls_rsvp_neighbor_ip_addr(self, v, load=False):
"""
Setter method for mpls_rsvp_neighbor_ip_addr, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_ip_addr (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_rsvp_neighbor_ip_addr is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_rsvp_neighbor_ip_addr() directly.
YANG Description: MPLS RSVP Neighbor IP address
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls_rsvp_neighbor_ip_addr", rest_name="mpls_rsvp_neighbor_ip_addr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_rsvp_neighbor_ip_addr must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls_rsvp_neighbor_ip_addr", rest_name="mpls_rsvp_neighbor_ip_addr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-address', is_config=True)""",
})
self.__mpls_rsvp_neighbor_ip_addr = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_rsvp_neighbor_ip_addr(self):
self.__mpls_rsvp_neighbor_ip_addr = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls_rsvp_neighbor_ip_addr", rest_name="mpls_rsvp_neighbor_ip_addr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='inet:ipv4-address', is_config=True)
def _get_mpls_rsvp_neighbor_interface(self):
"""
Getter method for mpls_rsvp_neighbor_interface, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_interface (string)
YANG Description: RSVP neighbor interface
"""
return self.__mpls_rsvp_neighbor_interface
def _set_mpls_rsvp_neighbor_interface(self, v, load=False):
"""
Setter method for mpls_rsvp_neighbor_interface, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_interface (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_rsvp_neighbor_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_rsvp_neighbor_interface() directly.
YANG Description: RSVP neighbor interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="mpls-rsvp-neighbor-interface", rest_name="mpls-rsvp-neighbor-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_rsvp_neighbor_interface must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="mpls-rsvp-neighbor-interface", rest_name="mpls-rsvp-neighbor-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)""",
})
self.__mpls_rsvp_neighbor_interface = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_rsvp_neighbor_interface(self):
self.__mpls_rsvp_neighbor_interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="mpls-rsvp-neighbor-interface", rest_name="mpls-rsvp-neighbor-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
def _get_mpls_rsvp_neighbor_status(self):
"""
Getter method for mpls_rsvp_neighbor_status, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_status (string)
YANG Description: Status of MPLS RSVP neighbor
"""
return self.__mpls_rsvp_neighbor_status
def _set_mpls_rsvp_neighbor_status(self, v, load=False):
"""
Setter method for mpls_rsvp_neighbor_status, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_status (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_rsvp_neighbor_status is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_rsvp_neighbor_status() directly.
YANG Description: Status of MPLS RSVP neighbor
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="mpls-rsvp-neighbor-status", rest_name="mpls-rsvp-neighbor-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_rsvp_neighbor_status must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="mpls-rsvp-neighbor-status", rest_name="mpls-rsvp-neighbor-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)""",
})
self.__mpls_rsvp_neighbor_status = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_rsvp_neighbor_status(self):
self.__mpls_rsvp_neighbor_status = YANGDynClass(base=unicode, is_leaf=True, yang_name="mpls-rsvp-neighbor-status", rest_name="mpls-rsvp-neighbor-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
def _get_mpls_rsvp_neighbor_last_status_change(self):
"""
Getter method for mpls_rsvp_neighbor_last_status_change, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_last_status_change (string)
YANG Description: Time since the status of RSVP neighbor last changed
"""
return self.__mpls_rsvp_neighbor_last_status_change
def _set_mpls_rsvp_neighbor_last_status_change(self, v, load=False):
"""
Setter method for mpls_rsvp_neighbor_last_status_change, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_last_status_change (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_rsvp_neighbor_last_status_change is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_rsvp_neighbor_last_status_change() directly.
YANG Description: Time since the status of RSVP neighbor last changed
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="mpls-rsvp-neighbor-last-status-change", rest_name="mpls-rsvp-neighbor-last-status-change", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_rsvp_neighbor_last_status_change must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="mpls-rsvp-neighbor-last-status-change", rest_name="mpls-rsvp-neighbor-last-status-change", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)""",
})
self.__mpls_rsvp_neighbor_last_status_change = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_rsvp_neighbor_last_status_change(self):
self.__mpls_rsvp_neighbor_last_status_change = YANGDynClass(base=unicode, is_leaf=True, yang_name="mpls-rsvp-neighbor-last-status-change", rest_name="mpls-rsvp-neighbor-last-status-change", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
def _get_mpls_rsvp_neighbor_hello_tx(self):
"""
Getter method for mpls_rsvp_neighbor_hello_tx, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_hello_tx (uint32)
YANG Description: Number of RSVP Hello messages transmitted for the neighbor
"""
return self.__mpls_rsvp_neighbor_hello_tx
def _set_mpls_rsvp_neighbor_hello_tx(self, v, load=False):
"""
Setter method for mpls_rsvp_neighbor_hello_tx, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_hello_tx (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_rsvp_neighbor_hello_tx is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_rsvp_neighbor_hello_tx() directly.
YANG Description: Number of RSVP Hello messages transmitted for the neighbor
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-rsvp-neighbor-hello-tx", rest_name="mpls-rsvp-neighbor-hello-tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_rsvp_neighbor_hello_tx must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-rsvp-neighbor-hello-tx", rest_name="mpls-rsvp-neighbor-hello-tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__mpls_rsvp_neighbor_hello_tx = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_rsvp_neighbor_hello_tx(self):
self.__mpls_rsvp_neighbor_hello_tx = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-rsvp-neighbor-hello-tx", rest_name="mpls-rsvp-neighbor-hello-tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
def _get_mpls_rsvp_neighbor_hello_rx(self):
"""
Getter method for mpls_rsvp_neighbor_hello_rx, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_hello_rx (uint32)
YANG Description: Number of RSVP Hello messages received for the neighbor
"""
return self.__mpls_rsvp_neighbor_hello_rx
def _set_mpls_rsvp_neighbor_hello_rx(self, v, load=False):
"""
Setter method for mpls_rsvp_neighbor_hello_rx, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_hello_rx (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_rsvp_neighbor_hello_rx is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_rsvp_neighbor_hello_rx() directly.
YANG Description: Number of RSVP Hello messages received for the neighbor
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-rsvp-neighbor-hello-rx", rest_name="mpls-rsvp-neighbor-hello-rx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_rsvp_neighbor_hello_rx must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-rsvp-neighbor-hello-rx", rest_name="mpls-rsvp-neighbor-hello-rx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__mpls_rsvp_neighbor_hello_rx = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_rsvp_neighbor_hello_rx(self):
self.__mpls_rsvp_neighbor_hello_rx = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-rsvp-neighbor-hello-rx", rest_name="mpls-rsvp-neighbor-hello-rx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
def _get_mpls_rsvp_neighbor_refresh_reduction_support(self):
"""
Getter method for mpls_rsvp_neighbor_refresh_reduction_support, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_refresh_reduction_support (boolean)
YANG Description: Status of Refresh Reduction support for the RSVP neighbor
"""
return self.__mpls_rsvp_neighbor_refresh_reduction_support
def _set_mpls_rsvp_neighbor_refresh_reduction_support(self, v, load=False):
"""
Setter method for mpls_rsvp_neighbor_refresh_reduction_support, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_refresh_reduction_support (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_rsvp_neighbor_refresh_reduction_support is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_rsvp_neighbor_refresh_reduction_support() directly.
YANG Description: Status of Refresh Reduction support for the RSVP neighbor
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="mpls-rsvp-neighbor-refresh-reduction-support", rest_name="mpls-rsvp-neighbor-refresh-reduction-support", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_rsvp_neighbor_refresh_reduction_support must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mpls-rsvp-neighbor-refresh-reduction-support", rest_name="mpls-rsvp-neighbor-refresh-reduction-support", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__mpls_rsvp_neighbor_refresh_reduction_support = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_rsvp_neighbor_refresh_reduction_support(self):
self.__mpls_rsvp_neighbor_refresh_reduction_support = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mpls-rsvp-neighbor-refresh-reduction-support", rest_name="mpls-rsvp-neighbor-refresh-reduction-support", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_mpls_rsvp_neighbor_msg_id_support(self):
"""
Getter method for mpls_rsvp_neighbor_msg_id_support, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_msg_id_support (boolean)
YANG Description: Status of Message ID support for the RSVP neighbor
"""
return self.__mpls_rsvp_neighbor_msg_id_support
def _set_mpls_rsvp_neighbor_msg_id_support(self, v, load=False):
"""
Setter method for mpls_rsvp_neighbor_msg_id_support, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp_neighbor_detail/output/mpls_rsvp_neighbor_detail/show_mpls_neighbor_brief/mpls_rsvp_neighbor_msg_id_support (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_rsvp_neighbor_msg_id_support is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_rsvp_neighbor_msg_id_support() directly.
YANG Description: Status of Message ID support for the RSVP neighbor
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="mpls-rsvp-neighbor-msg-id-support", rest_name="mpls-rsvp-neighbor-msg-id-support", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_rsvp_neighbor_msg_id_support must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mpls-rsvp-neighbor-msg-id-support", rest_name="mpls-rsvp-neighbor-msg-id-support", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__mpls_rsvp_neighbor_msg_id_support = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_rsvp_neighbor_msg_id_support(self):
self.__mpls_rsvp_neighbor_msg_id_support = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mpls-rsvp-neighbor-msg-id-support", rest_name="mpls-rsvp-neighbor-msg-id-support", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
mpls_rsvp_neighbor_ip_addr = __builtin__.property(_get_mpls_rsvp_neighbor_ip_addr, _set_mpls_rsvp_neighbor_ip_addr)
mpls_rsvp_neighbor_interface = __builtin__.property(_get_mpls_rsvp_neighbor_interface, _set_mpls_rsvp_neighbor_interface)
mpls_rsvp_neighbor_status = __builtin__.property(_get_mpls_rsvp_neighbor_status, _set_mpls_rsvp_neighbor_status)
mpls_rsvp_neighbor_last_status_change = __builtin__.property(_get_mpls_rsvp_neighbor_last_status_change, _set_mpls_rsvp_neighbor_last_status_change)
mpls_rsvp_neighbor_hello_tx = __builtin__.property(_get_mpls_rsvp_neighbor_hello_tx, _set_mpls_rsvp_neighbor_hello_tx)
mpls_rsvp_neighbor_hello_rx = __builtin__.property(_get_mpls_rsvp_neighbor_hello_rx, _set_mpls_rsvp_neighbor_hello_rx)
mpls_rsvp_neighbor_refresh_reduction_support = __builtin__.property(_get_mpls_rsvp_neighbor_refresh_reduction_support, _set_mpls_rsvp_neighbor_refresh_reduction_support)
mpls_rsvp_neighbor_msg_id_support = __builtin__.property(_get_mpls_rsvp_neighbor_msg_id_support, _set_mpls_rsvp_neighbor_msg_id_support)
_pyangbind_elements = {'mpls_rsvp_neighbor_ip_addr': mpls_rsvp_neighbor_ip_addr, 'mpls_rsvp_neighbor_interface': mpls_rsvp_neighbor_interface, 'mpls_rsvp_neighbor_status': mpls_rsvp_neighbor_status, 'mpls_rsvp_neighbor_last_status_change': mpls_rsvp_neighbor_last_status_change, 'mpls_rsvp_neighbor_hello_tx': mpls_rsvp_neighbor_hello_tx, 'mpls_rsvp_neighbor_hello_rx': mpls_rsvp_neighbor_hello_rx, 'mpls_rsvp_neighbor_refresh_reduction_support': mpls_rsvp_neighbor_refresh_reduction_support, 'mpls_rsvp_neighbor_msg_id_support': mpls_rsvp_neighbor_msg_id_support, }
| 76.825
| 574
| 0.77478
| 4,443
| 30,730
| 4.993023
| 0.045915
| 0.148215
| 0.19185
| 0.057699
| 0.930581
| 0.902452
| 0.880499
| 0.840561
| 0.832312
| 0.826497
| 0
| 0.01149
| 0.110674
| 30,730
| 399
| 575
| 77.017544
| 0.800249
| 0.232672
| 0
| 0.469298
| 0
| 0.048246
| 0.36265
| 0.263977
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118421
| false
| 0
| 0.035088
| 0
| 0.27193
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c27fc9500326f0e58f92c0f317904d02cf62470
| 135
|
py
|
Python
|
pywolf/views/pywolf/create_user/__init__.py
|
tevawolf/pywolf
|
94e3c26d8c3b279990624f23658e22ab00eead46
|
[
"BSD-3-Clause"
] | null | null | null |
pywolf/views/pywolf/create_user/__init__.py
|
tevawolf/pywolf
|
94e3c26d8c3b279990624f23658e22ab00eead46
|
[
"BSD-3-Clause"
] | null | null | null |
pywolf/views/pywolf/create_user/__init__.py
|
tevawolf/pywolf
|
94e3c26d8c3b279990624f23658e22ab00eead46
|
[
"BSD-3-Clause"
] | null | null | null |
from .create_user import create_user
from .confirm_create_user import confirm_create_user
from .exe_create_user import exe_create_user
| 33.75
| 52
| 0.888889
| 22
| 135
| 5
| 0.272727
| 0.545455
| 0.436364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 135
| 3
| 53
| 45
| 0.894309
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8c5e00e84b9d412ae2a7a3cd0bfca02a8eb550c3
| 135
|
py
|
Python
|
ma-poca/mapoca/mapoca/trainers/trainer/__init__.py
|
Unity-Technologies/paper-ml-agents
|
885144ee25e86b929c5acee90b9b8dc059bcb9af
|
[
"Apache-2.0"
] | 1
|
2022-03-25T13:54:29.000Z
|
2022-03-25T13:54:29.000Z
|
ma-poca/mapoca/mapoca/trainers/trainer/__init__.py
|
Unity-Technologies/paper-ml-agents
|
885144ee25e86b929c5acee90b9b8dc059bcb9af
|
[
"Apache-2.0"
] | null | null | null |
ma-poca/mapoca/mapoca/trainers/trainer/__init__.py
|
Unity-Technologies/paper-ml-agents
|
885144ee25e86b929c5acee90b9b8dc059bcb9af
|
[
"Apache-2.0"
] | 1
|
2022-03-25T14:42:32.000Z
|
2022-03-25T14:42:32.000Z
|
from mapoca.trainers.trainer.trainer import Trainer # noqa
from mapoca.trainers.trainer.trainer_factory import TrainerFactory # noqa
| 45
| 74
| 0.837037
| 17
| 135
| 6.588235
| 0.470588
| 0.178571
| 0.321429
| 0.446429
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103704
| 135
| 2
| 75
| 67.5
| 0.92562
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4fc7e8e511b45f7efdc5f89c4c683919eeca3836
| 24,652
|
py
|
Python
|
server/data/email_classes/email_config.py
|
MikeSmvl/travelingstrategy
|
3d38c64f00bafdf2ca1079d14f9b618bce8307b0
|
[
"MIT"
] | null | null | null |
server/data/email_classes/email_config.py
|
MikeSmvl/travelingstrategy
|
3d38c64f00bafdf2ca1079d14f9b618bce8307b0
|
[
"MIT"
] | 2
|
2021-05-08T23:09:17.000Z
|
2021-09-02T11:27:08.000Z
|
server/data/email_classes/email_config.py
|
MikeSmvl/travelingstrategy
|
3d38c64f00bafdf2ca1079d14f9b618bce8307b0
|
[
"MIT"
] | 2
|
2020-10-14T01:18:32.000Z
|
2020-11-09T16:54:16.000Z
|
style = """<style>
.container {
position: relative;
width: 50%;
}
.image {
opacity: 1;
display: block;
width: 100%;
height: auto;
transition: .5s ease;
backface-visibility: hidden;
}
.middle {
transition: .5s ease;
opacity: 0;
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
-ms-transform: translate(-50%, -50%);
text-align: center;
color:white;
}
.container:hover .image {
filter: brightness(50%);
transition: .2s;
}
.container:hover .middle {
opacity: 1;
}
</style>
"""
message_body = """
<div style="font-size: 12px; font-family: Helvetica, serif, EmojiFont; font-weight: normal; font-style: normal; text-transform: none; text-indent: 0px; background-color: rgb(237, 240, 242); text-decoration: none; white-space: normal; word-spacing: 0px; letter-spacing: normal; font-variant-caps: normal;">
<div style="max-width:600px;margin:0 auto;">
<table align="center" border="0" cellspacing="0" cellpadding="0" role="presentation" style="width:600px;border-collapse:collapse;">
<tbody><tr>
<td align="center" valign="top" style="font-size:0;direction:ltr;border-collapse:collapse;padding:0;">
<div align="left" style="font-size:13px;vertical-align:top;display:inline-block;direction:ltr;width:600px;max-width:100%;">
<table width="100%" border="0" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:collapse;">
<tbody><tr>
<td valign="top" style="border-collapse:collapse;padding:0;">
<table width="100%" border="0" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:collapse;">
<tbody><tr>
<td align="left" style="font-size:0;border-collapse:collapse;padding:0;word-break:break-word;">
<table border="0" cellspacing="0" cellpadding="0" style="font-size:13px;font-family:Helvetica,Arial,sans-serif;width:600px;border-collapse:collapse;table-layout:auto;line-height:22px;">
<tbody><tr>
<td style="border-collapse:collapse;">
<table cellspacing="0" cellpadding="0" style="display:block;border-collapse:collapse;">
<tbody><tr>
<td height="16" style="border-collapse:collapse;line-height:16px;"> </td>
</tr>
</tbody></table>
<table bgcolor="white" cellspacing="0" cellpadding="0" style="width:600px;border-collapse:collapse;border-bottom-right-radius:0px;border-bottom-left-radius:0px;border-top-left-radius:8px;border-top-right-radius:8px;">
<tbody><tr>
<td height="46" style="border-collapse:collapse;"> </td>
</tr>
</tbody></table>
</td>
</tr>
</tbody></table>
</td>
</tr>
</tbody></table>
</td>
</tr>
</tbody></table>
</div>
</td>
</tr>
</tbody></table>
</div>
<div style="background-color:white;max-width:600px;margin:0 auto;">
<table align="center" bgcolor="white" border="0" cellspacing="0" cellpadding="0" role="presentation" style="width:600px;border-collapse:collapse;">
<tbody><tr>
<td align="center" valign="top" style="font-size:0;direction:ltr;border-collapse:collapse;padding:0;">
<div align="left" style="font-size:13px;vertical-align:top;display:inline-block;direction:ltr;width:600px;max-width:100%;">
<table width="100%" border="0" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:collapse;">
<tbody><tr>
<td valign="top" style="border-collapse:collapse;padding:0;">
<table width="100%" border="0" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:collapse;">
<tbody><tr>
<td align="left" style="font-size:0;border-collapse:collapse;padding:0;word-break:break-word;">
<table border="0" cellspacing="0" cellpadding="0" style="font-size:13px;font-family:Helvetica,Arial,sans-serif;width:600px;border-collapse:collapse;table-layout:auto;line-height:22px;">
<tbody><tr>
<th style="width:48px;"> </th>
<th>
<table border="0" cellspacing="0" cellpadding="0" style="font-size:13px;font-family:Helvetica,Arial,sans-serif;width:125.91px;border-collapse:collapse;table-layout:auto;line-height:22px;">
<tbody><tr>
<th width="56" height="56">
<table border="0" cellspacing="0" cellpadding="0" role="presentation" style="border-spacing:0;border-collapse:collapse;">
<tbody><tr>
<td style="width:56px;border-collapse:collapse;"><img data-imagetype="External" src="https://i.imgur.com/Z5A90af.png" alt="traveling strategy logo" style="font-size:13px;display:block;width:56px;text-decoration:none;border-width:0;border-style:none;outline:none;line-height:13px;"></td>
</tr>
</tbody></table>
</th>
<th width="16"> </th>
<th>
<div align="left" style="color: rgb(14, 19, 24); font-size: 14px; font-family: "Open Sans", Helvetica, Arial, sans-serif, serif, EmojiFont; font-weight: bold; line-height: 1.6;">
</div>
</th>
</tr>
</tbody></table>
</th>
<th align="right" valign="middle"><span style="color:#0E1318;font-size:14px;font-weight:bold;padding:0 0 4px 0;"></span><span>Traveling Newsletter<span> </span></span><span style="color:#6E7174;font-size:14px;font-weight:normal;">- Traveling Guide</span></th>
<th style="width:48px;"> </th>
</tr>
<tr>
<th colspan="4" style="height:36px;"> </th>
</tr>
</tbody></table>
</td>
</tr>
</tbody></table>
</td>
</tr>
</tbody></table>
</div>
</td>
</tr>
</tbody></table>
</div>
<div style="background-color:white;max-width:600px;margin:0 auto;">
<table align="center" bgcolor="white" border="0" cellspacing="0" cellpadding="0" role="presentation" style="width:600px;border-collapse:collapse;">
<tbody><tr>
<td align="center" valign="top" style="font-size:0;direction:ltr;border-collapse:collapse;padding:12px 0;">
<div align="left" style="font-size:13px;vertical-align:top;display:inline-block;direction:ltr;width:600px;max-width:100%;">
<table width="100%" border="0" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:collapse;">
<tbody><tr>
<td valign="top" style="border-collapse:collapse;padding:0;">
<table width="100%" border="0" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:collapse;">
<tbody><tr>
<td align="left" style="font-size:0;border-collapse:collapse;padding:0 48px 8px 48px;word-break:break-word;">
<div align="left" style="color: rgb(14, 19, 24); font-size: 32px; font-family: "Open Sans", Helvetica, Arial, "sans serif", serif, EmojiFont; font-weight: 700; line-height: 41.6px; letter-spacing: -0.5px;">
TRAVEL OUTSIDE THE BOX</div>
</td>
</tr>
<tr>
<td align="left" style="font-size:0;border-collapse:collapse;padding:8px 48px 0 48px;word-break:break-word;">
<div align="left" style="color: rgb(86, 90, 93); font-size: 14px; font-family: "Open Sans", Helvetica, Arial, "sans serif", serif, EmojiFont; line-height: 22.4px;">
“Travel isn’t always pretty. It isn’t always comfortable. Sometimes it hurts, it even breaks your heart. But that’s okay. The journey changes you; it should change you. It leaves marks on your memory,
on your consciousness, on your heart, and on your body. You take something with you. Hopefully, you leave something good behind.” – Anthony Bourdain<br>
<br>
Through traveling, you learn that the only perfect time is now. Life is short and the world is wide.</div>
</td>
</tr>
</tbody></table>
</td>
</tr>
</tbody></table>
</div>
</td>
</tr>
</tbody></table>
</div>
<div style="background-color:white;max-width:600px;margin:0 auto;">
<table align="center" bgcolor="white" border="0" cellspacing="0" cellpadding="0" role="presentation" style="width:600px;border-collapse:collapse;">
<tbody><tr>
<td align="center" valign="top" style="font-size:0;direction:ltr;border-collapse:collapse;padding:0;">
<div align="left" style="font-size:13px;vertical-align:top;display:inline-block;direction:ltr;width:600px;max-width:100%;">
<table width="100%" border="0" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:collapse;">
<tbody><tr>
<td valign="top" style="border-collapse:collapse;padding:0;">
<table width="100%" border="0" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:collapse;">
<tbody><tr>
<td align="left" style="font-size:0;border-collapse:collapse;padding:12px 48px;word-break:break-word;">
<table border="0" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:separate;line-height:0px;">
<tbody><tr>
<td align="center" valign="middle" bgcolor="#FF8080" role="presentation" style="border-collapse:collapse;border-style:none;border-bottom-right-radius:4px;border-bottom-left-radius:4px;border-top-left-radius:4px;border-top-right-radius:4px;">
<a href="https://www.travelingstrategy.com" target="_blank" rel="noopener noreferrer" data-auth="NotApplicable" style="color:white;font-size:14px;font-family:Open Sans,Helvetica,Arial,sans serif;font-weight:600;text-transform:none;background-color:#FF8080;display:inline-block;text-decoration:none;margin:0;padding:9px 16px;border-bottom-left-radius:4px;line-height:22.399999618530273px;border-bottom-right-radius:4px;border-top-left-radius:4px;border-top-right-radius:4px;">Visit
website</a></td>
</tr>
</tbody></table>
</td>
</tr>
</tbody></table>
</td>
</tr>
</tbody></table>
</div>
</td>
</tr>
</tbody></table>
</div>
"""
image_left_table_top_tags = """
<div style="background-color:white;max-width:600px;margin:0 auto;">
<table align="center" bgcolor="white" border="0" cellspacing="0" cellpadding="0" role="presentation" style="width:600px;border-collapse:collapse;">
<tbody><tr>
<td align="center" valign="top" style="font-size:0;direction:ltr;border-collapse:collapse;padding:12px 0;">
<div align="left" style="font-size:13px;vertical-align:top;display:inline-block;direction:ltr;width:600px;max-width:100%;">
<table width="100%" border="0" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:collapse;">
<tbody>
<tr>
<td valign="top" style="border-collapse:collapse;padding:0;">
<table width="100%" border="0" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:collapse;">
<tbody>
<tr>
<td align="left" style="font-size:0;border-collapse:collapse;padding:0;word-break:break-word;">
<table border="0" cellspacing="0" cellpadding="0" style="font-size:13px;font-family:Helvetica,Arial,sans-serif;width:600px;border-collapse:collapse;table-layout:auto;line-height:22px;">
<tbody>
<tr>
<th style="width:48px;"> </th>
<th>
<table cellspacing="0" cellpadding="0" style="border-collapse:collapse;">
<tbody><tr>
<th>
<table cellspacing="0" cellpadding="0" style="border-collapse:collapse;">
<tbody>
"""
image_left_table_bottom_tags = """
</tbody></table>
</th>
<th width="16"> </th>
"""
image_right_table_top_tags = """
<th>
<table cellspacing="0" cellpadding="0" style="border-collapse:collapse;">
<tbody>
"""
image_bottom_tags = """
</tbody></table>
</th>
</tr>
</tbody></table>
</th>
<th style="width:48px;"> </th>
</tr>
</tbody></table>
</td>
</tr>
</tbody></table>
</td>
</tr>
</tbody></table>
</div>
</td>
</tr>
</tbody></table>
</div>
"""
footer = """<div style="max-width:600px;margin:0 auto;">
<table align="center" border="0" cellspacing="0" cellpadding="0" role="presentation" style="width:600px;border-collapse:collapse;">
<tbody><tr>
<td align="center" valign="top" style="font-size:0;direction:ltr;border-collapse:collapse;padding:0;">
<div align="left" style="font-size:13px;vertical-align:top;display:inline-block;direction:ltr;width:600px;max-width:100%;">
<table width="100%" border="0" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:collapse;">
<tbody><tr>
<td valign="top" style="border-collapse:collapse;padding:0;">
<table width="100%" border="0" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:collapse;">
<tbody><tr>
<td align="left" style="font-size:0;border-collapse:collapse;padding:0;word-break:break-word;">
<table border="0" cellspacing="0" cellpadding="0" style="font-size:13px;font-family:Helvetica,Arial,sans-serif;width:600px;border-collapse:collapse;table-layout:auto;line-height:22px;">
<tbody><tr>
<td colspan="3" style="border-collapse:collapse;">
<table bgcolor="white" cellspacing="0" cellpadding="0" style="width:600px;border-collapse:collapse;border-bottom-right-radius:8px;border-bottom-left-radius:8px;border-top-left-radius:0px;border-top-right-radius:0px;">
<tbody><tr>
<td height="36" style="border-collapse:collapse;"> </td>
</tr>
</tbody></table>
</td>
</tr>
<tr>
<td height="24" style="border-collapse:collapse;"> </td>
</tr>
<tr>
<td width="24" style="border-collapse:collapse;"> </td>
<td style="border-collapse:collapse;">
<div align="center" style="color: rgb(86, 90, 93); font-size: 14px; font-family: "Open Sans", Helvetica, Arial, "sans serif", serif, EmojiFont; line-height: 22.4px;">
You are receiving this email because you signed up to Traveling Strategy.</div>
</td>
<td width="24" style="border-collapse:collapse;"> </td>
</tr>
<tr>
<td height="24" colspan="3" style="border-collapse:collapse;border-bottom:1px solid #DDE1E3;">
</td>
</tr>
<tr>
<td height="24" style="border-collapse:collapse;"> </td>
</tr>
<tr>
<td width="24" style="border-collapse:collapse;"> </td>
<td style="border-collapse:collapse;">
<div align="center" style="color: rgb(86, 90, 93); font-size: 14px; font-family: "Open Sans", Helvetica, Arial, "sans serif", serif, EmojiFont; line-height: 22.4px;">
<span>Travel smarter and safer from Traveling Strategy</span><span> </span><img data-imagetype="External" src="https://appboy-images.com/appboy/communication/assets/image_assets/images/5e0190dc6a8d637c8dd20cff/original.png?1577160924" alt="love" style="text-decoration:none;border-width:0;outline:none;line-height:14px;"><span> </span><br>
<span>TravelingStrategy®</span>,<span> </span><span>Concordia university, Montreal, Canada</span></div>
</td>
<td width="24" style="border-collapse:collapse;"> </td>
</tr>
<tr>
<td colspan="3" style="border-collapse:collapse;">
<table align="center" cellspacing="0" cellpadding="0" style="border-collapse:collapse;">
<tbody><tr>
<th><a href="https://www.travelingstrategy.com">Visit
travelingstrategy.com</a></th>
<th width="16"></th>
<th><a href="">Unsubscribe</a></th>
</tr>
</tbody></table>
</td>
</tr>
<tr>
<td height="16" style="border-collapse:collapse;"></td>
</tr>
<tr>
<td align="center" colspan="3" style="border-collapse:collapse;">
<table border="0" cellspacing="0" cellpadding="0" style="font-size:13px;font-family:Helvetica,Arial,sans-serif;width:144px;border-collapse:collapse;table-layout:auto;line-height:22px;">
<tbody><tr>
<th width="24"><a href="" target="_blank" rel="noopener noreferrer" data-auth="NotApplicable" style="color:#0E1318;display:block;text-decoration:underline;line-height:0;"><img data-imagetype="External" src="https://appboy-images.com/appboy/communication/assets/image_assets/images/5e01866c167e920348c59e1f/original.png?1577158252" alt="facebook" style="text-decoration:none;border-width:0;outline:none;line-height:13px;"></a></th>
<th width="16"></th>
<th width="24"><a href="" target="_blank" rel="noopener noreferrer" data-auth="NotApplicable" style="color:#0E1318;display:block;text-decoration:underline;line-height:0;"><img data-imagetype="External" src="https://appboy-images.com/appboy/communication/assets/image_assets/images/5e01866ccda48838c1473107/original.png?1577158252" alt="twitter" style="text-decoration:none;border-width:0;outline:none;line-height:13px;"></a></th>
<th width="16"></th>
<th width="24"><a href="" target="_blank" rel="noopener noreferrer" data-auth="NotApplicable" style="color:#0E1318;display:block;text-decoration:underline;line-height:0;"><img data-imagetype="External" src="https://appboy-images.com/appboy/communication/assets/image_assets/images/5e01866daf874e03968b2978/original.png?1577158253" alt="pinterest" style="text-decoration:none;border-width:0;outline:none;line-height:13px;"></a></th>
<th width="16"></th>
<th width="24"><a href="" target="_blank" rel="noopener noreferrer" data-auth="NotApplicable" style="color:#0E1318;display:block;text-decoration:underline;line-height:0;"><img data-imagetype="External" src="https://appboy-images.com/appboy/communication/assets/image_assets/images/5e01866d5b99ac6f6aaa44d0/original.png?1577158253" alt="instagram" style="text-decoration:none;border-width:0;outline:none;line-height:13px;"></a></th>
<th width="16"></th>
</tr>
</tbody></table>
</td>
</tr>
</tbody></table>
</td>
</tr>
</tbody></table>
</td>
</tr>
</tbody></table>
</div>
</td>
</tr>
</tbody></table>
</div>
</div>
"""
| 68.860335
| 516
| 0.481786
| 2,400
| 24,652
| 4.939167
| 0.109167
| 0.083854
| 0.129914
| 0.091108
| 0.801755
| 0.775181
| 0.740847
| 0.735532
| 0.719335
| 0.713008
| 0
| 0.047712
| 0.375953
| 24,652
| 358
| 517
| 68.860335
| 0.722699
| 0
| 0
| 0.764179
| 0
| 0.220896
| 0.992171
| 0.296029
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
8b12f2aaa32c4d2f04a2ff4a73814c6073f45264
| 43,455
|
py
|
Python
|
handlers/kbeServer/Editor/Interface/interface_res.py
|
iamjing66/tornaodo_sdk
|
242a852d3231e5798aa357a17e5638c7dc0567b9
|
[
"MIT"
] | null | null | null |
handlers/kbeServer/Editor/Interface/interface_res.py
|
iamjing66/tornaodo_sdk
|
242a852d3231e5798aa357a17e5638c7dc0567b9
|
[
"MIT"
] | null | null | null |
handlers/kbeServer/Editor/Interface/interface_res.py
|
iamjing66/tornaodo_sdk
|
242a852d3231e5798aa357a17e5638c7dc0567b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
import json
import logging
import Global
def GetUpdateVersion(DB,UID,pam):
json_data = {
"code": 0,
"msg": ""
}
operateType = ""
SqlID = ""
if pam == "":
#0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
json_data["code"] = "0"
json_data["msg"] = "false"
return json_data
versions = pam["pam"].split(',')
table_project = "tb_res_version"
if len(versions) == 4:
sql_str = "select VERSION,OPENCODE from " + table_project + " where OPENCODE in (105,106,107,125)"
elif len(versions) == 5:
sql_str = "select VERSION,OPENCODE from " + table_project + " where OPENCODE in (105,106,107,112,125)"
elif len(versions) == 8:
sql_str = "select VERSION,OPENCODE from " + table_project + " where OPENCODE in (105,106,107,111,112,116,117,125)"
else:
sql_str = "select VERSION,OPENCODE from " + table_project
data = DB.fetchall(sql_str,None)
_cback = ""
_cback2 = ""
index = 0
_Minfo = ""
_Minfo2 =""
##print("data:",data)
if data:
list_data = list(data)
for minfo in list_data:
if versions[index] != str(minfo[0]):
_Minfo = str(minfo[1])
_Minfo2 = versions[index]
else:
_Minfo = "-"
_Minfo2 = "-"
if _cback2 != "":
_cback2 = _cback2+"," + _Minfo2
else:
_cback2 = _Minfo2
if _cback != "":
_cback = _cback + "," + _Minfo
else:
_cback = _Minfo
index = index+1
_cback = _cback+"*"+ _cback2
json_data["code"] = "1"
json_data["msg"] = _cback
#print("json_data:", json_data)
return json_data
def new_get_update_version(DB, pam):
json_data = {"code": 0, "msg": ""}
if pam == "":
# 101,102,103,104,....
json_data["msg"] = Global.LanguageInst.GetMsg("SMSGID_0_1","ch")
return json_data
sql_else = ";"
if pam["pam"]:
sql_else = " where OPENCODE in (" + pam["pam"] + ");"
sql_str = "select VERSION from tb_res_version" + sql_else
data = DB.fetchall(sql_str, None)
_cback = ""
_backlist = []
if data:
for i in data:
_backlist.append(str(i[0]))
_cback = ",".join(_backlist)
json_data["code"] = 1
json_data["msg"] = _cback
return json_data
def AnlyzeCode(DB,UID,Pam):
json_data = {
"code": 0,
"msg": ""
}
if Pam == "":
json_data["code"] = "0"
json_data["msg"] = "false"
return json_data
json_data =DoCode(DB,Pam["code"],Pam["page"],Pam["version"])
return json_data
def new_anlyze_code(DB, Pam):
json_data = {
"code": 0,
"msg": ""
}
if Pam == "":
json_data["msg"] = "false"
return json_data
json_data = new_do_code(DB, Pam["code"], Pam["page"], Pam["version"])
return json_data
def DoCode(DB,OpenCode,page,version):
json_data = {
"code": 0,
"msg": ""
}
table_project = ""
sql_str = ""
_cback = ""
_getresAll = "1"
if OpenCode == "101":
table_project = "tb_ft_res"
sql_str = "select FID,FNAME,KSClass from " + table_project + GetLimit(page)
elif OpenCode == "102":
table_project = "tb_st_res"
sql_str = "select SID,SNAME,PID from " + table_project + GetLimit(page)
elif OpenCode == "103":
table_project = "tb_ft_scene"
sql_str = "select FID,FNAME from " + table_project + GetLimit(page)
elif OpenCode == "104":
table_project = "tb_ft_audio"
sql_str = "select FID,FNAME from " + table_project + GetLimit(page)
elif OpenCode == "105" or OpenCode == "205":
minVersion = GetUpdateResMinVersion(DB, "105", version)
table_project = "tb_config_res"
if minVersion != "0" and int(version) >= int(minVersion):
_getresAll = "0"
ResID = GetUpdateResData(DB, "105", version)
sql_str = "select * from " + table_project + " where RID in (" + ResID + ")" + GetLimit(page)
else:
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "106" or OpenCode == "206":
minVersion = GetUpdateResMinVersion(DB, "106", version)
table_project = "tb_config_scene"
if minVersion != "0" and int(version) >= int(minVersion):
_getresAll = "0"
ResID = GetUpdateResData(DB, "106", version)
sql_str = "select * from " + table_project + " where RID in (" + ResID + ")" + GetLimit(page)
else:
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "107" or OpenCode == "207":
table_project = "tb_config_audio"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "108":
table_project = "tb_config_vip"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "109":
table_project = "tb_config_coursedata"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "110":
table_project = "tb_config_left_res"
sql_str = "select * from " + table_project + GetLimit(page)
# elif OpenCode == "111":
# table_project = "tb_course_type"
# sql_str = "select t2.ct, t1.TypeName, t1.cIconPath,t2.UID, t2.CID from tb_course_type t1 inner join tb_course_sort t2 on t1.CID = t2.ct and t2.visible = 0 group by t2.UID, t1.Sort, t2.sort order by t1.Sort, t2.sort"
elif OpenCode == "111":
table_project = "tb_course_type"
sql_str = "select * from " + table_project + " order by Sort" + GetLimit(page)
elif OpenCode == "112":
table_project = "tb_work_type"
sql_str = "select * from " + table_project + " order by Sort"
elif OpenCode == "113":
table_project = "tb_crclass_res"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "114":
table_project = "tb_star_level"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "115":
table_project = "tb_framer_level"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "116":
table_project = "tb_channel"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "117":
table_project = "tb_discount"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "118":
table_project = "tb_new_vip"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "119":
table_project = "tb_new_vipdiscount"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "120":
table_project = "tb_new_suffix"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "121":
table_project = "tb_new_config"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "122":
table_project = "tb_config_subject"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "123":
table_project = "tb_config_class"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "124":
table_project = "tb_config_section"
sql_str = "select * from " + table_project + GetLimit(page)
elif OpenCode == "125":
table_project = "tb_config_skybox"
sql_str = "select * from " + table_project + GetLimit(page)
data = DB.fetchall(sql_str,None)
_cback = GetSqlData(OpenCode,data)
version = GetCodeVersion(DB,str(OpenCode))
_cback =str(OpenCode)+ "*" + str(_cback) + "*" + str(version) + "*" + str(page)+ "*" + str(_getresAll)
json_data["code"] = 1
json_data["msg"] = _cback
return json_data
def new_do_code(DB, OpenCode, page, version):
json_data = {
"code": 0,
"msg": ""
}
table_project = ""
sql_str = ""
_cback = ""
_getresAll = "1"
if OpenCode == "101":
table_project = "tb_ft_res"
sql_str = "select FID,FNAME,KSClass from " + table_project
elif OpenCode == "102":
table_project = "tb_st_res"
sql_str = "select SID,SNAME,PID from " + table_project
elif OpenCode == "103":
table_project = "tb_ft_scene"
sql_str = "select FID,FNAME from " + table_project
elif OpenCode == "104":
table_project = "tb_ft_audio"
sql_str = "select FID,FNAME from " + table_project
elif OpenCode == "105" or OpenCode == "205":
table_project = "tb_config_res"
if int(version) == 0:
sql_str = "select * from " + table_project + GetLimit(page)
else:
_getresAll = "0"
ResID = GetUpdateResData(DB, "105", version)
sql_str = "select * from " + table_project + " where RID in (" + ResID + ")" + GetLimit(page)
elif OpenCode == "106" or OpenCode == "206":
table_project = "tb_config_scene"
if int(version) == 0:
sql_str = "select * from " + table_project + GetLimit(page)
else:
_getresAll = "0"
ResID = GetUpdateResData(DB, "106", version)
sql_str = "select * from " + table_project + " where RID in (" + ResID + ")" + GetLimit(page)
elif OpenCode == "107" or OpenCode == "207":
table_project = "tb_config_audio"
if int(version) == 0:
sql_str = "select * from " + table_project + GetLimit(page)
else:
_getresAll = "0"
ResID = GetUpdateResData(DB, "107", version)
sql_str = "select * from " + table_project + " where RID in (" + ResID + ")" + GetLimit(page)
elif OpenCode == "108":
table_project = "tb_config_vip"
sql_str = "select * from " + table_project
elif OpenCode == "109":
table_project = "tb_config_coursedata"
sql_str = "select * from " + table_project
elif OpenCode == "110":
table_project = "tb_config_left_res"
sql_str = "select * from " + table_project
elif OpenCode == "111":
sql_str = "select CID, TypeName, cIconPath, sort from tb_course_type where state = 1 order by Sort;"
elif OpenCode == "112":
table_project = "tb_work_type"
sql_str = "select * from " + table_project + " order by Sort"
elif OpenCode == "113":
table_project = "tb_crclass_res"
sql_str = "select * from " + table_project
elif OpenCode == "114":
table_project = "tb_star_level"
sql_str = "select * from " + table_project
elif OpenCode == "115":
table_project = "tb_framer_level"
sql_str = "select * from " + table_project
elif OpenCode == "116":
table_project = "tb_channel"
sql_str = "select * from " + table_project
elif OpenCode == "117":
table_project = "tb_discount"
sql_str = "select * from " + table_project
elif OpenCode == "118":
table_project = "tb_new_vip"
sql_str = "select * from " + table_project
elif OpenCode == "119":
table_project = "tb_new_vipdiscount"
sql_str = "select * from " + table_project
elif OpenCode == "120":
table_project = "tb_new_suffix"
sql_str = "select * from " + table_project
elif OpenCode == "121":
table_project = "tb_new_config"
sql_str = "select * from " + table_project
elif OpenCode == "122":
table_project = "tb_config_subject"
sql_str = "select * from " + table_project
elif OpenCode == "123":
table_project = "tb_config_class"
sql_str = "select * from " + table_project
elif OpenCode == "124":
table_project = "tb_config_section"
sql_str = "select * from " + table_project
elif OpenCode == "125":
table_project = "tb_config_skybox"
sql_str = "select * from " + table_project
elif OpenCode == "126":
table_project = "tb_config_ui"
sql_str = "select * from " + table_project
elif OpenCode == "127":
table_project = "tb_xr_workclassify"
sql_str = "select CID,CNAME,EName,state from " + table_project + " order by `sort`"
elif OpenCode == "128":
table_project = "tb_xr_tags"
sql_str = "select tid,tname,tenname from " + table_project+ " order by `sort`"
elif OpenCode == "129":
table_project = "tb_xr_platform"
sql_str = "select TID,TNAME from " + table_project+ " order by `sort`"
elif OpenCode == "130":
table_project = "tb_xr_fileversion_data"
sql_str = "select MID,RID,resType,resPath,resData,resversion from "
data = DB.fetchall(sql_str, None)
_cback = new_get_sql_data(OpenCode, data)
version = GetCodeVersion(DB, str(OpenCode))
_cback = "*".join([str(OpenCode), str(_cback), str(version), str(page), _getresAll])
json_data["code"] = 1
json_data["msg"] = _cback
return json_data
def new_get_sql_data(OpenCode, data):
_cback = ""
_count = 0
if data:
_backlist = []
for minfo in data:
if _count < 500:
if OpenCode == "101":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "102":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "103":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "104":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "105":
_backlist.append("`".join(
list(map(str, list(minfo[2:30]) + [minfo[1]] + list(minfo[30:])))))
elif OpenCode == "106":
_backlist.append("`".join(
list(map(str, list(minfo[2:30]) + [minfo[1]] + list(minfo[30:])))))
elif OpenCode == "107":
_backlist.append("`".join(
list(map(str, list(minfo[2:15]) + [minfo[1], minfo[15]]))))
elif OpenCode == "108":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "109":
_backlist.append("`".join([str(i) for i in minfo[1:8]]))
elif OpenCode == "110":
_backlist.append("`".join([str(i) for i in minfo[1:]]))
elif OpenCode == "111":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "112":
_backlist.append("`".join([str(i) for i in minfo[1:5]]))
elif OpenCode == "113":
_backlist.append("`".join([str(i) for i in minfo[1:]]))
elif OpenCode == "114":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "115":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "116":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "117":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "118":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "119":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "120":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "121":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "122":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "123":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "124":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "125":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "126":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "127":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "128":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "129":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "130":
_backlist.append("`".join([str(i) for i in minfo]))
elif OpenCode == "205":
_backlist.append("`".join([
str(minfo[2]),
str(minfo[14]),
str(minfo[15]),
str(minfo[32]),
str(minfo[33]),
str(minfo[34]),
str(minfo[30])
]))
elif OpenCode == "206":
_backlist.append("`".join([
str(minfo[2]),
str(minfo[15]),
str(minfo[14]),
str(minfo[31]),
str(minfo[32]),
str(minfo[33]),
str(minfo[13])
]))
elif OpenCode == "207":
_backlist.append("`".join(
[str(minfo[2]),
str(minfo[14]),
str(minfo[15])]))
_count = _count + 1
_cback = "^".join(_backlist)
if _count < 500:
_cback += "*1"
else:
_cback += "*0"
return _cback
def GetSqlData(OpenCode,data):
_cback = ""
_count = 0
if data:
list_data = list(data)
for minfo in list_data:
if _count < 500:
if OpenCode == "101":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2])
elif OpenCode == "102":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2])
elif OpenCode == "103":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1])
elif OpenCode == "104":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1])
elif OpenCode == "105":
if _cback != "":
_cback = _cback + "^" + str(minfo[2]) + "`" + str(minfo[3]) + "`" + str(minfo[4]) + "`" + str(
minfo[5]) + "`" + str(minfo[6]) + "`" + str(minfo[7]) + "`" + str(minfo[8]) + "`" + str(
minfo[9]) + "`" + str(minfo[10]) + "`" + str(minfo[11]) + "`" + str(minfo[12]) + "`" + str(
minfo[13]) + "`" + str(minfo[14]) + "`" + str(minfo[15]) + "`" + str(minfo[16]) + "`" + str(
minfo[17]) + "`" + str(minfo[18]) + "`" + str(minfo[19]) + "`" + str(minfo[20]) + "`" + str(
minfo[21]) + "`" + str(minfo[22]) + "`" + str(minfo[23]) + "`" + str(minfo[24]) + "`" + str(
minfo[25]) + "`" + str(minfo[26]) + "`" + str(minfo[27]) + "`" + str(minfo[28]) + "`" + str(
minfo[29]) + "`" + str(minfo[1]) + "`" + str(minfo[30]) + "`" + str(minfo[31]) + "`" + str(
minfo[32]) + "`" + str(minfo[33]) + "`" + str(minfo[34]) + "`" + str(minfo[35]) + "`" + str(
minfo[36])
else:
_cback = str(minfo[2]) + "`" + str(minfo[3]) + "`" + str(minfo[4]) + "`" + str(
minfo[5]) + "`" + str(minfo[6]) + "`" + str(minfo[7]) + "`" + str(minfo[8]) + "`" + str(
minfo[9]) + "`" + str(minfo[10]) + "`" + str(minfo[11]) + "`" + str(
minfo[12]) + "`" + str(
minfo[13]) + "`" + str(minfo[14]) + "`" + str(minfo[15]) + "`" + str(
minfo[16]) + "`" + str(
minfo[17]) + "`" + str(minfo[18]) + "`" + str(minfo[19]) + "`" + str(
minfo[20]) + "`" + str(
minfo[21]) + "`" + str(minfo[22]) + "`" + str(minfo[23]) + "`" + str(
minfo[24]) + "`" + str(
minfo[25]) + "`" + str(minfo[26]) + "`" + str(minfo[27]) + "`" + str(
minfo[28]) + "`" + str(
minfo[29]) + "`" + str(minfo[1]) + "`" + str(minfo[30]) + "`" + str(
minfo[31]) + "`" + str(
minfo[32]) + "`" + str(minfo[33]) + "`" + str(minfo[34]) + "`" + str(
minfo[35]) + "`" + str(
minfo[36])
elif OpenCode == "106":
if _cback != "":
_cback = _cback + "^" + str(minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5]) + "`" + str(minfo[6]) + "`" + str(
minfo[7]) + "`" + str(minfo[8]) + "`" + str(minfo[9]) + "`" + str(
minfo[10]) + "`" + str(
minfo[11]) + "`" + str(minfo[12]) + "`" + str(minfo[13]) + "`" + str(
minfo[14]) + "`" + str(
minfo[15]) + "`" + str(minfo[16]) + "`" + str(minfo[17]) + "`" + str(
minfo[18]) + "`" + str(
minfo[19]) + "`" + str(minfo[20]) + "`" + str(minfo[21]) + "`" + str(
minfo[22]) + "`" + str(
minfo[23]) + "`" + str(minfo[24]) + "`" + str(minfo[25]) + "`" + str(
minfo[26]) + "`" + str(
minfo[27]) + "`" + str(minfo[28]) + "`" + str(minfo[29]) + "`" + str(
minfo[1]) + "`" + str(
minfo[30]) + "`" + str(
minfo[31]) + "`" + str(minfo[32]) + "`" + str(minfo[33])
else:
_cback = str(minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5]) + "`" + str(minfo[6]) + "`" + str(
minfo[7]) + "`" + str(minfo[8]) + "`" + str(minfo[9]) + "`" + str(
minfo[10]) + "`" + str(
minfo[11]) + "`" + str(minfo[12]) + "`" + str(minfo[13]) + "`" + str(
minfo[14]) + "`" + str(
minfo[15]) + "`" + str(minfo[16]) + "`" + str(minfo[17]) + "`" + str(
minfo[18]) + "`" + str(
minfo[19]) + "`" + str(minfo[20]) + "`" + str(minfo[21]) + "`" + str(
minfo[22]) + "`" + str(
minfo[23]) + "`" + str(minfo[24]) + "`" + str(minfo[25]) + "`" + str(
minfo[26]) + "`" + str(
minfo[27]) + "`" + str(minfo[28]) + "`" + str(minfo[29]) + "`" + str(
minfo[1]) + "`" + str(
minfo[30]) + "`" + str(
minfo[31]) + "`" + str(minfo[32]) + "`" + str(minfo[33])
elif OpenCode == "107":
if _cback != "":
_cback = _cback + "^" + str(minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5]) + "`" + str(minfo[6]) + "`" + str(
minfo[7]) + "`" + str(minfo[8]) + "`" + str(minfo[9]) + "`" + str(
minfo[10]) + "`" + str(
minfo[11]) + "`" + str(minfo[12]) + "`" + str(minfo[13]) + "`" + str(
minfo[14]) + "`" + str(minfo[1]) + "`" + str(
minfo[15])
else:
_cback = str(minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5]) + "`" + str(minfo[6]) + "`" + str(
minfo[7]) + "`" + str(minfo[8]) + "`" + str(minfo[9]) + "`" + str(
minfo[10]) + "`" + str(
minfo[11]) + "`" + str(minfo[12]) + "`" + str(minfo[13]) + "`" + str(
minfo[14]) + "`" + str(minfo[1]) + "`" + str(
minfo[15])
elif OpenCode == "108":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(
minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5]) + "`" + str(minfo[6]) + "`" + str(
minfo[7]) + "`" + str(minfo[8]) + "`" + str(minfo[9]) + "`" + str(
minfo[10]) + "`" + str(
minfo[11]) + "`" + str(minfo[12]) + "`" + str(minfo[13]) + "`" + str(
minfo[14]) + "`" + str(
minfo[15])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5]) + "`" + str(minfo[6]) + "`" + str(
minfo[7]) + "`" + str(minfo[8]) + "`" + str(minfo[9]) + "`" + str(
minfo[10]) + "`" + str(
minfo[11]) + "`" + str(minfo[12]) + "`" + str(minfo[13]) + "`" + str(
minfo[14]) + "`" + str(
minfo[15])
elif OpenCode == "109":
if _cback != "":
_cback = _cback + "^" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5]) + "`" + str(
minfo[6]) + "`" + str(minfo[7])
else:
_cback = str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5]) + "`" + str(
minfo[6]) + "`" + str(minfo[7])
elif OpenCode == "110":
if _cback != "":
_cback = _cback + "^" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4])
else:
_cback = str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4])
elif OpenCode == "111":
if _cback != "":
_cback = _cback + "^" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(minfo[3]) + "`" + str(minfo[4])
else:
_cback = str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(minfo[3]) + "`" + str(minfo[4])
# _cback = str(minfo[0])+"`"+str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(minfo[3]) + "`" + str(minfo[4])
elif OpenCode == "112":
if _cback != "":
_cback += "^" + "`".join([str(i) for i in minfo[1:5]])
else:
_cback = "`".join([str(i) for i in minfo[1:5]])
# if _cback != "":
# _cback = _cback + "^" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(
# minfo[3]) + "`" + str(
# minfo[4])
# else:
# _cback = str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(minfo[3]) + "`" + str(minfo[4])
elif OpenCode == "113":
if _cback != "":
_cback = _cback + "^" + str(minfo[1]) + "`" + str(minfo[2])
else:
_cback = str(minfo[1]) + "`" + str(minfo[2])
elif OpenCode == "114":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(
minfo[2]) + "`" + str(
minfo[3])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(minfo[3])
elif OpenCode == "115":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(
minfo[2]) + "`" + str(
minfo[3])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(minfo[3])
elif OpenCode == "116":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(
minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5])
elif OpenCode == "117":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(
minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4])
elif OpenCode == "118":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(
minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5]) + "`" + str(minfo[6]) + "`" + str(
minfo[7]) + "`" + str(minfo[8]) + "`" + str(minfo[9]) + "`" + str(
minfo[10]) + "`" + str(
minfo[11]) + "`" + str(minfo[12]) + "`" + str(minfo[13])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5]) + "`" + str(minfo[6]) + "`" + str(
minfo[7]) + "`" + str(minfo[8]) + "`" + str(minfo[9]) + "`" + str(
minfo[10]) + "`" + str(
minfo[11]) + "`" + str(minfo[12]) + "`" + str(minfo[13])
elif OpenCode == "119":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(
minfo[2]) + "`" + str(
minfo[3])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(minfo[3])
elif OpenCode == "120":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(
minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5])
elif OpenCode == "121":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(
minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5]) + "`" + str(minfo[6]) + "`" + str(
minfo[7])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(
minfo[3]) + "`" + str(minfo[4]) + "`" + str(minfo[5]) + "`" + str(minfo[6]) + "`" + str(
minfo[7])
elif OpenCode == "122":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2])
elif OpenCode == "123":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(
minfo[2]) + "`" + str(
minfo[3])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(minfo[3])
elif OpenCode == "124":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(
minfo[2]) + "`" + str(
minfo[3])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(minfo[3])
elif OpenCode == "125":
if _cback != "":
_cback = _cback + "^" + str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(minfo[3])
else:
_cback = str(minfo[0]) + "`" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(minfo[3])
elif OpenCode == "126":
if _cback != "":
_cback = _cback + "^" + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(minfo[3]) + "`" + str(minfo[4])+ "`" + str(minfo[5])+ "`" + str(minfo[6])+ "`" + str(minfo[7])
else:
_cback = + str(minfo[1]) + "`" + str(minfo[2]) + "`" + str(minfo[3]) + "`" + str(minfo[4])+ "`" + str(minfo[5])+ "`" + str(minfo[6])+ "`" + str(minfo[7])
elif OpenCode == "205":
if _cback != "":
_cback = _cback + "^" + str(minfo[2]) + "`" + str(minfo[14]) + "`" + str(minfo[15]) + "`" + str(minfo[32])+ "`" + str(minfo[33])+ "`" + str(minfo[34])+ "`" + str(minfo[30])
else:
_cback = str(minfo[2]) + "`" + str(minfo[14]) + "`" + str(minfo[15]) + "`" + str(minfo[32])+ "`" + str(minfo[33])+ "`" + str(minfo[34])+ "`" + str(minfo[30])
elif OpenCode == "206":
if _cback != "":
_cback = _cback + "^" + str(minfo[2]) + "`" + str(minfo[15]) + "`" + str(minfo[14]) + "`" + str(minfo[31])+ "`" + str(minfo[32])+ "`" + str(minfo[33])+ "`" + str(minfo[13])
else:
_cback = str(minfo[2]) + "`" + str(minfo[15]) + "`" + str(minfo[14]) + "`" + str(minfo[31])+ "`" + str(minfo[32])+ "`" + str(minfo[33])+ "`" + str(minfo[13])
elif OpenCode == "207":
if _cback != "":
_cback = _cback + "^" + str(minfo[2]) + "`" + str(minfo[14]) + "`" + str(minfo[15])
else:
_cback = str(minfo[2]) + "`" + str(minfo[14]) + "`" + str(minfo[15])
_count = _count + 1
if _count < 500:
_cback = _cback + "*1"
else:
_cback = _cback + "*0"
return _cback
def GetLimit(page):
ipage = int(page)
_limit = " limit " + str((ipage - 1) * 500) + ",500"
return _limit
def GetCodeVersion(DB, code):
d1 = {
"205": "105",
"206": "106",
"207": "107"
}
code = d1.get(code, code)
data = ""
table_project = "tb_res_version"
sql_str = "select VERSION from " + table_project +" where OPENCODE = " + str(code)
data = DB.fetchone(sql_str, None)
version = ""
if data:
version= data[0]
return version
def GetUpdateResData(DB,code,version):
table_project = ""
sql_str = ""
_cback = "0"
table_project = "tb_config_vdetails"
sql_str = "select resids from " + table_project + " where OPENCODE = "+str(code) +" and version > " +str(version)
# &ID, &Res_Id, &Res, &BuyRes, &SceneID
sqlinfo = []
data = DB.fetchall(sql_str, None)
if data:
list_data = list(data)
for minfo in list_data:
sqlinfo.append(minfo[0])
sqlinfo= list(set(sqlinfo))
sqlinfo.sort()
for back in sqlinfo:
if _cback != "":
_cback =_cback+ ","+back
else:
_cback = back
return _cback
#获取最小版本号
def GetUpdateResMinVersion(DB, code, version):
table_project = "tb_config_vdetails"
sql_str = "select * from " + table_project + " where OPENCODE = " + str(code) + " ORDER BY version limit 1"
# &ID, &Res_Id, &Res, &BuyRes, &SceneID
data = DB.fetchone(sql_str,None)
if data:
_cback = data[2]
else:
_cback = "0"
return _cback
def Server_ConfigGet(DB,params):
json_data = {
"code": 0,
"Data": "",
"OpenCode": ""
}
_sql = ""
if params == "":
json_data["code"] = "0"
json_data["Data"] = ""
return json_data
OpenCode= params["OpenCode"]
limit =params["Pam"]
json_data["OpenCode"] = OpenCode
if OpenCode == "202":
_table_name = "tb_config_vip"
_sql = "select Rank,PrivilegePrice from " + _table_name + limit
elif OpenCode == "203":
_table_name = "tb_config_res"
_sql = "select RID,PriceYear,PriceForever from " + _table_name + " where IsNeedBuy = 1" + limit
elif OpenCode == "204":
_table_name = "tb_config_scene"
_sql = "select RID,PriceYear,PriceForever from " + _table_name + " where IsNeedBuy = 1" + limit
elif OpenCode == "205":
_table_name = "tb_config_coursedata"
_sql = "select CourseId,CID,LID,UID,Active from " + _table_name + limit
elif OpenCode == "206":
_table_name = "tb_config_left_res"
_sql = "select Res_Id,BuyRes,SceneID from " + _table_name + limit
elif OpenCode == "207":
_table_name = "tb_channel"
_sql = "select CID,WTYPE,WID,PRICE from " + _table_name + limit
elif OpenCode == "208":
_table_name = "tb_discount"
_sql = "select CID,DAYS,Discount from " + _table_name + limit
elif OpenCode == "xrvipconfig":
_table_name = "tb_xr_vip_score"
_sql = "select * from " + _table_name+ limit
elif OpenCode == "xrworkconfig":
_sql = "select `import`,`vipimport`,`template`,`viptemplate`,`shoucang`,`vipshoucang` from tb_xr_user_config where id = 1 " + limit
##print(_sql)
##print(_sql)
data = DB.fetchall(_sql,None)
_cback = ""
if data:
minfo = list(data)
if OpenCode == "202":
for info in minfo:
if _cback == "":
_cback = str(info[0])+"*"+str(info[1])
else:
_cback = _cback +"^"+str(info[0])+"*"+str(info[1])
elif OpenCode == "203":
for info in minfo:
if _cback == "":
_cback = str(info[0]) + "*" + str(info[1]) + "*" + str(info[2])
else:
_cback = _cback + "^" + str(info[0]) + "*" + str(info[1]) + "*" + str(info[2])
elif OpenCode == "204":
for info in minfo:
if _cback == "":
_cback = str(info[0]) + "*" + str(info[1]) + "*" + str(info[2])
else:
_cback = _cback + "^" + str(info[0]) + "*" + str(info[1]) + "*" + str(info[2])
elif OpenCode == "205":
for info in minfo:
if _cback == "":
_cback = str(info[0]) + "*" + str(info[1]) + "*" + str(info[2])+ "*" + str(info[3]) + "*" + str(info[4])
else:
_cback = _cback + "^" + str(info[0]) + "*" + str(info[1]) + "*" + str(info[2])+ "*" + str(info[3]) + "*" + str(info[4])
elif OpenCode == "206":
for info in minfo:
if _cback == "":
_cback = str(info[0]) + "*" + str(info[1]) + "*" + str(info[2])
else:
_cback = _cback + "^" + str(info[0]) + "*" + str(info[1]) + "*" + str(info[2])
elif OpenCode == "207":
for info in minfo:
if _cback == "":
_cback = str(info[0]) + "*" + str(info[1]) + "*" + str(info[2])+ "*" + str(info[3])
else:
_cback = _cback + "^" + str(info[0]) + "*" + str(info[1]) + "*" + str(info[2])+ "*" + str(info[3])
elif OpenCode == "208":
for info in minfo:
if _cback == "":
_cback = str(info[0]) + "*" + str(info[1]) + "*" + str(info[2])
else:
_cback = _cback + "^" + str(info[0]) + "*" + str(info[1]) + "*" + str(info[2])
elif OpenCode == "xrvipconfig":
vipdata = {
}
for info in minfo:
di = {
"id":int(info[1]),
"cost": int(info[2]),
"desc": info[3],
}
vipdata[di["id"]] = di
_cback = json.dumps(vipdata)
elif OpenCode == "xrworkconfig":
workdata = {
}
for info in minfo:
workdata["import"] = int(info[0])
workdata["vipimport"] = int(info[1])
workdata["template"] = int(info[2])
workdata["viptemplate"] = int(info[3])
workdata["shoucang"] = int(info[4])
workdata["vipshoucang"] = int(info[5])
_cback = json.dumps(workdata)
json_data["code"] = "1"
json_data["Data"] = _cback
return json_data
| 47.285092
| 225
| 0.425475
| 4,445
| 43,455
| 3.994151
| 0.058268
| 0.209981
| 0.046637
| 0.033795
| 0.833502
| 0.814521
| 0.782866
| 0.763039
| 0.728794
| 0.719838
| 0
| 0.05189
| 0.393764
| 43,455
| 918
| 226
| 47.336601
| 0.622039
| 0.020366
| 0
| 0.754982
| 0
| 0
| 0.102174
| 0.007263
| 0.001172
| 0
| 0
| 0
| 0
| 1
| 0.01524
| false
| 0
| 0.007034
| 0
| 0.043376
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8b2839b9affbb8ee468617ebd2c7fe7fc4bdce21
| 6,125
|
py
|
Python
|
tests/test_regional_stager.py
|
cariad/startifact
|
ef0b855ca2061f9e6982ed28c63b3da4c5d1464d
|
[
"MIT"
] | null | null | null |
tests/test_regional_stager.py
|
cariad/startifact
|
ef0b855ca2061f9e6982ed28c63b3da4c5d1464d
|
[
"MIT"
] | 15
|
2021-11-09T13:22:57.000Z
|
2021-12-17T12:24:27.000Z
|
tests/test_regional_stager.py
|
cariad/startifact
|
ef0b855ca2061f9e6982ed28c63b3da4c5d1464d
|
[
"MIT"
] | null | null | null |
from multiprocessing import Queue
from pathlib import Path
from mock import ANY, Mock, patch
from pytest import raises
from semver import VersionInfo # pyright: reportMissingTypeStubs=false
from startifact.parameters.latest_version import LatestVersionParameter
from startifact.regional_process_result import RegionalProcessResult
from startifact.regional_stager import RegionalStager
def test_assert_not_exists__false(
regional_stager: RegionalStager,
session: Mock,
) -> None:
with patch("startifact.regional_stager.exists", return_value=False) as e:
regional_stager.assert_not_exists()
e.assert_called_once_with("bucket-10", "SugarWater@1.2.3", session)
def test_assert_not_exists__true(
regional_stager: RegionalStager,
session: Mock,
) -> None:
with patch("startifact.regional_stager.exists", return_value=True) as e:
with raises(Exception) as ex:
regional_stager.assert_not_exists()
e.assert_called_once_with("bucket-10", "SugarWater@1.2.3", session)
assert str(ex.value) == "SugarWater@1.2.3 exists in bucket-10 in eu-west-10"
def test_operate(
latest_version_parameter: LatestVersionParameter,
queue: "Queue[RegionalProcessResult]",
session: Mock,
) -> None:
uploader = RegionalStager(
bucket="buck",
file_hash="file_hash",
key="SugarWater@1.2.3",
latest_version_parameter=latest_version_parameter,
path=Path("LICENSE"),
queue=queue,
read_only=False,
session=session,
version=VersionInfo(1, 2, 3),
)
with patch.object(uploader, "assert_not_exists") as assert_not_exists:
with patch.object(uploader, "put_object") as put_object:
with patch.object(uploader, "put_metadata") as put_metadata:
uploader.operate()
assert_not_exists.assert_called_once_with()
put_object.assert_called_once_with()
put_metadata.assert_called_once_with()
assert latest_version_parameter.value == "1.2.3"
def test_put_metadata(
latest_version_parameter: LatestVersionParameter,
queue: "Queue[RegionalProcessResult]",
session: Mock,
) -> None:
put_object = Mock()
s3 = Mock()
s3.put_object = put_object
client = Mock(return_value=s3)
session.client = client
uploader = RegionalStager(
bucket="buck",
file_hash="file_hash",
key="SugarWater@1.2.3",
latest_version_parameter=latest_version_parameter,
metadata=b"metadata",
metadata_hash="metadata_hash",
path=Path("upload.zip"),
queue=queue,
read_only=False,
session=session,
version=VersionInfo(1, 2, 3),
)
uploader.put_metadata()
client.assert_called_once_with("s3")
put_object.assert_called_once_with(
Body=b"metadata",
Bucket="buck",
ContentMD5="metadata_hash",
Key="SugarWater@1.2.3/metadata",
)
def test_put_metadata__no_metadata(
latest_version_parameter: LatestVersionParameter,
queue: "Queue[RegionalProcessResult]",
session: Mock,
) -> None:
put_object = Mock()
s3 = Mock()
s3.put_object = put_object
client = Mock(return_value=s3)
session.client = client
uploader = RegionalStager(
bucket="buck",
file_hash="file_hash",
key="SugarWater@1.2.3",
latest_version_parameter=latest_version_parameter,
path=Path("upload.zip"),
queue=queue,
read_only=False,
session=session,
version=VersionInfo(1, 2, 3),
)
uploader.put_metadata()
client.assert_not_called()
put_object.assert_not_called()
def test_put_metadata__read_only(
latest_version_parameter: LatestVersionParameter,
queue: "Queue[RegionalProcessResult]",
session: Mock,
) -> None:
put_object = Mock()
s3 = Mock()
s3.put_object = put_object
client = Mock(return_value=s3)
session.client = client
uploader = RegionalStager(
bucket="buck",
file_hash="file_hash",
key="SugarWater@1.2.3",
latest_version_parameter=latest_version_parameter,
metadata=b"metadata",
metadata_hash="metadata_hash",
path=Path("upload.zip"),
queue=queue,
read_only=True,
session=session,
version=VersionInfo(1, 2, 3),
)
uploader.put_metadata()
client.assert_not_called()
put_object.assert_not_called()
def test_put_object(
latest_version_parameter: LatestVersionParameter,
queue: "Queue[RegionalProcessResult]",
session: Mock,
) -> None:
put_object = Mock()
s3 = Mock()
s3.put_object = put_object
client = Mock(return_value=s3)
session.client = client
uploader = RegionalStager(
bucket="buck",
file_hash="file_hash",
key="SugarWater@1.2.3",
latest_version_parameter=latest_version_parameter,
metadata=b"metadata",
metadata_hash="metadata_hash",
path=Path("LICENSE"),
queue=queue,
read_only=False,
session=session,
version=VersionInfo(1, 2, 3),
)
uploader.put_object()
client.assert_called_once_with("s3")
put_object.assert_called_once_with(
Body=ANY,
Bucket="buck",
ContentMD5="file_hash",
Key="SugarWater@1.2.3",
)
def test_put_object__read_only(
latest_version_parameter: LatestVersionParameter,
queue: "Queue[RegionalProcessResult]",
session: Mock,
) -> None:
put_object = Mock()
s3 = Mock()
s3.put_object = put_object
client = Mock(return_value=s3)
session.client = client
uploader = RegionalStager(
bucket="buck",
file_hash="file_hash",
key="SugarWater@1.2.3",
latest_version_parameter=latest_version_parameter,
metadata=b"metadata",
metadata_hash="metadata_hash",
path=Path("LICENSE"),
queue=queue,
read_only=True,
session=session,
version=VersionInfo(1, 2, 3),
)
uploader.put_object()
client.assert_not_called()
put_object.assert_not_called()
| 25.95339
| 80
| 0.668082
| 710
| 6,125
| 5.491549
| 0.105634
| 0.062324
| 0.107207
| 0.036676
| 0.809438
| 0.778918
| 0.760451
| 0.754296
| 0.754296
| 0.745832
| 0
| 0.01711
| 0.227102
| 6,125
| 235
| 81
| 26.06383
| 0.806506
| 0.006041
| 0
| 0.774194
| 0
| 0
| 0.12652
| 0.042557
| 0
| 0
| 0
| 0
| 0.11828
| 1
| 0.043011
| false
| 0
| 0.043011
| 0
| 0.086022
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8b32ded3aec10b6089c88bb42fcf6a2b687d97c8
| 17,327
|
py
|
Python
|
sdk/python/pulumi_aws/budgets/budget.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/budgets/budget.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/budgets/budget.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Budget(pulumi.CustomResource):
account_id: pulumi.Output[str]
"""
The ID of the target account for budget. Will use current user's account_id by default if omitted.
"""
budget_type: pulumi.Output[str]
"""
Whether this budget tracks monetary cost or usage.
"""
cost_filters: pulumi.Output[dict]
"""
Map of CostFilters key/value pairs to apply to the budget.
"""
cost_types: pulumi.Output[dict]
"""
Object containing CostTypes The types of cost included in a budget, such as tax and subscriptions..
* `includeCredit` (`bool`) - A boolean value whether to include credits in the cost budget. Defaults to `true`
* `includeDiscount` (`bool`) - Specifies whether a budget includes discounts. Defaults to `true`
* `includeOtherSubscription` (`bool`) - A boolean value whether to include other subscription costs in the cost budget. Defaults to `true`
* `includeRecurring` (`bool`) - A boolean value whether to include recurring costs in the cost budget. Defaults to `true`
* `includeRefund` (`bool`) - A boolean value whether to include refunds in the cost budget. Defaults to `true`
* `includeSubscription` (`bool`) - A boolean value whether to include subscriptions in the cost budget. Defaults to `true`
* `includeSupport` (`bool`) - A boolean value whether to include support costs in the cost budget. Defaults to `true`
* `includeTax` (`bool`) - A boolean value whether to include tax in the cost budget. Defaults to `true`
* `includeUpfront` (`bool`) - A boolean value whether to include upfront costs in the cost budget. Defaults to `true`
* `useAmortized` (`bool`) - Specifies whether a budget uses the amortized rate. Defaults to `false`
* `useBlended` (`bool`) - A boolean value whether to use blended costs in the cost budget. Defaults to `false`
"""
limit_amount: pulumi.Output[str]
"""
The amount of cost or usage being measured for a budget.
"""
limit_unit: pulumi.Output[str]
"""
The unit of measurement used for the budget forecast, actual spend, or budget threshold, such as dollars or GB. See [Spend](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/data-type-spend.html) documentation.
"""
name: pulumi.Output[str]
"""
The name of a budget. Unique within accounts.
"""
name_prefix: pulumi.Output[str]
"""
The prefix of the name of a budget. Unique within accounts.
"""
notifications: pulumi.Output[list]
"""
Object containing Budget Notifications. Can be used multiple times to define more than one budget notification
* `comparison_operator` (`str`) - (Required) Comparison operator to use to evaluate the condition. Can be `LESS_THAN`, `EQUAL_TO` or `GREATER_THAN`.
* `notification_type` (`str`) - (Required) What kind of budget value to notify on. Can be `ACTUAL` or `FORECASTED`
* `subscriberEmailAddresses` (`list`) - (Optional) E-Mail addresses to notify. Either this or `subscriber_sns_topic_arns` is required.
* `subscriberSnsTopicArns` (`list`) - (Optional) SNS topics to notify. Either this or `subscriber_email_addresses` is required.
* `threshold` (`float`) - (Required) Threshold when the notification should be sent.
* `thresholdType` (`str`) - (Required) What kind of threshold is defined. Can be `PERCENTAGE` OR `ABSOLUTE_VALUE`.
"""
time_period_end: pulumi.Output[str]
"""
The end of the time period covered by the budget. There are no restrictions on the end date. Format: `2017-01-01_12:00`.
"""
time_period_start: pulumi.Output[str]
"""
The start of the time period covered by the budget. The start date must come before the end date. Format: `2017-01-01_12:00`.
"""
time_unit: pulumi.Output[str]
"""
The length of time until a budget resets the actual and forecasted spend. Valid values: `MONTHLY`, `QUARTERLY`, `ANNUALLY`.
"""
def __init__(__self__, resource_name, opts=None, account_id=None, budget_type=None, cost_filters=None, cost_types=None, limit_amount=None, limit_unit=None, name=None, name_prefix=None, notifications=None, time_period_end=None, time_period_start=None, time_unit=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a budgets budget resource. Budgets use the cost visualisation provided by Cost Explorer to show you the status of your budgets, to provide forecasts of your estimated costs, and to track your AWS usage, including your free tier usage.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: The ID of the target account for budget. Will use current user's account_id by default if omitted.
:param pulumi.Input[str] budget_type: Whether this budget tracks monetary cost or usage.
:param pulumi.Input[dict] cost_filters: Map of CostFilters key/value pairs to apply to the budget.
:param pulumi.Input[dict] cost_types: Object containing CostTypes The types of cost included in a budget, such as tax and subscriptions..
:param pulumi.Input[str] limit_amount: The amount of cost or usage being measured for a budget.
:param pulumi.Input[str] limit_unit: The unit of measurement used for the budget forecast, actual spend, or budget threshold, such as dollars or GB. See [Spend](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/data-type-spend.html) documentation.
:param pulumi.Input[str] name: The name of a budget. Unique within accounts.
:param pulumi.Input[str] name_prefix: The prefix of the name of a budget. Unique within accounts.
:param pulumi.Input[list] notifications: Object containing Budget Notifications. Can be used multiple times to define more than one budget notification
:param pulumi.Input[str] time_period_end: The end of the time period covered by the budget. There are no restrictions on the end date. Format: `2017-01-01_12:00`.
:param pulumi.Input[str] time_period_start: The start of the time period covered by the budget. The start date must come before the end date. Format: `2017-01-01_12:00`.
:param pulumi.Input[str] time_unit: The length of time until a budget resets the actual and forecasted spend. Valid values: `MONTHLY`, `QUARTERLY`, `ANNUALLY`.
The **cost_types** object supports the following:
* `includeCredit` (`pulumi.Input[bool]`) - A boolean value whether to include credits in the cost budget. Defaults to `true`
* `includeDiscount` (`pulumi.Input[bool]`) - Specifies whether a budget includes discounts. Defaults to `true`
* `includeOtherSubscription` (`pulumi.Input[bool]`) - A boolean value whether to include other subscription costs in the cost budget. Defaults to `true`
* `includeRecurring` (`pulumi.Input[bool]`) - A boolean value whether to include recurring costs in the cost budget. Defaults to `true`
* `includeRefund` (`pulumi.Input[bool]`) - A boolean value whether to include refunds in the cost budget. Defaults to `true`
* `includeSubscription` (`pulumi.Input[bool]`) - A boolean value whether to include subscriptions in the cost budget. Defaults to `true`
* `includeSupport` (`pulumi.Input[bool]`) - A boolean value whether to include support costs in the cost budget. Defaults to `true`
* `includeTax` (`pulumi.Input[bool]`) - A boolean value whether to include tax in the cost budget. Defaults to `true`
* `includeUpfront` (`pulumi.Input[bool]`) - A boolean value whether to include upfront costs in the cost budget. Defaults to `true`
* `useAmortized` (`pulumi.Input[bool]`) - Specifies whether a budget uses the amortized rate. Defaults to `false`
* `useBlended` (`pulumi.Input[bool]`) - A boolean value whether to use blended costs in the cost budget. Defaults to `false`
The **notifications** object supports the following:
* `comparison_operator` (`pulumi.Input[str]`) - (Required) Comparison operator to use to evaluate the condition. Can be `LESS_THAN`, `EQUAL_TO` or `GREATER_THAN`.
* `notification_type` (`pulumi.Input[str]`) - (Required) What kind of budget value to notify on. Can be `ACTUAL` or `FORECASTED`
* `subscriberEmailAddresses` (`pulumi.Input[list]`) - (Optional) E-Mail addresses to notify. Either this or `subscriber_sns_topic_arns` is required.
* `subscriberSnsTopicArns` (`pulumi.Input[list]`) - (Optional) SNS topics to notify. Either this or `subscriber_email_addresses` is required.
* `threshold` (`pulumi.Input[float]`) - (Required) Threshold when the notification should be sent.
* `thresholdType` (`pulumi.Input[str]`) - (Required) What kind of threshold is defined. Can be `PERCENTAGE` OR `ABSOLUTE_VALUE`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['account_id'] = account_id
if budget_type is None:
raise TypeError("Missing required property 'budget_type'")
__props__['budget_type'] = budget_type
__props__['cost_filters'] = cost_filters
__props__['cost_types'] = cost_types
if limit_amount is None:
raise TypeError("Missing required property 'limit_amount'")
__props__['limit_amount'] = limit_amount
if limit_unit is None:
raise TypeError("Missing required property 'limit_unit'")
__props__['limit_unit'] = limit_unit
__props__['name'] = name
__props__['name_prefix'] = name_prefix
__props__['notifications'] = notifications
__props__['time_period_end'] = time_period_end
if time_period_start is None:
raise TypeError("Missing required property 'time_period_start'")
__props__['time_period_start'] = time_period_start
if time_unit is None:
raise TypeError("Missing required property 'time_unit'")
__props__['time_unit'] = time_unit
super(Budget, __self__).__init__(
'aws:budgets/budget:Budget',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, account_id=None, budget_type=None, cost_filters=None, cost_types=None, limit_amount=None, limit_unit=None, name=None, name_prefix=None, notifications=None, time_period_end=None, time_period_start=None, time_unit=None):
"""
Get an existing Budget resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: The ID of the target account for budget. Will use current user's account_id by default if omitted.
:param pulumi.Input[str] budget_type: Whether this budget tracks monetary cost or usage.
:param pulumi.Input[dict] cost_filters: Map of CostFilters key/value pairs to apply to the budget.
:param pulumi.Input[dict] cost_types: Object containing CostTypes The types of cost included in a budget, such as tax and subscriptions..
:param pulumi.Input[str] limit_amount: The amount of cost or usage being measured for a budget.
:param pulumi.Input[str] limit_unit: The unit of measurement used for the budget forecast, actual spend, or budget threshold, such as dollars or GB. See [Spend](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/data-type-spend.html) documentation.
:param pulumi.Input[str] name: The name of a budget. Unique within accounts.
:param pulumi.Input[str] name_prefix: The prefix of the name of a budget. Unique within accounts.
:param pulumi.Input[list] notifications: Object containing Budget Notifications. Can be used multiple times to define more than one budget notification
:param pulumi.Input[str] time_period_end: The end of the time period covered by the budget. There are no restrictions on the end date. Format: `2017-01-01_12:00`.
:param pulumi.Input[str] time_period_start: The start of the time period covered by the budget. The start date must come before the end date. Format: `2017-01-01_12:00`.
:param pulumi.Input[str] time_unit: The length of time until a budget resets the actual and forecasted spend. Valid values: `MONTHLY`, `QUARTERLY`, `ANNUALLY`.
The **cost_types** object supports the following:
* `includeCredit` (`pulumi.Input[bool]`) - A boolean value whether to include credits in the cost budget. Defaults to `true`
* `includeDiscount` (`pulumi.Input[bool]`) - Specifies whether a budget includes discounts. Defaults to `true`
* `includeOtherSubscription` (`pulumi.Input[bool]`) - A boolean value whether to include other subscription costs in the cost budget. Defaults to `true`
* `includeRecurring` (`pulumi.Input[bool]`) - A boolean value whether to include recurring costs in the cost budget. Defaults to `true`
* `includeRefund` (`pulumi.Input[bool]`) - A boolean value whether to include refunds in the cost budget. Defaults to `true`
* `includeSubscription` (`pulumi.Input[bool]`) - A boolean value whether to include subscriptions in the cost budget. Defaults to `true`
* `includeSupport` (`pulumi.Input[bool]`) - A boolean value whether to include support costs in the cost budget. Defaults to `true`
* `includeTax` (`pulumi.Input[bool]`) - A boolean value whether to include tax in the cost budget. Defaults to `true`
* `includeUpfront` (`pulumi.Input[bool]`) - A boolean value whether to include upfront costs in the cost budget. Defaults to `true`
* `useAmortized` (`pulumi.Input[bool]`) - Specifies whether a budget uses the amortized rate. Defaults to `false`
* `useBlended` (`pulumi.Input[bool]`) - A boolean value whether to use blended costs in the cost budget. Defaults to `false`
The **notifications** object supports the following:
* `comparison_operator` (`pulumi.Input[str]`) - (Required) Comparison operator to use to evaluate the condition. Can be `LESS_THAN`, `EQUAL_TO` or `GREATER_THAN`.
* `notification_type` (`pulumi.Input[str]`) - (Required) What kind of budget value to notify on. Can be `ACTUAL` or `FORECASTED`
* `subscriberEmailAddresses` (`pulumi.Input[list]`) - (Optional) E-Mail addresses to notify. Either this or `subscriber_sns_topic_arns` is required.
* `subscriberSnsTopicArns` (`pulumi.Input[list]`) - (Optional) SNS topics to notify. Either this or `subscriber_email_addresses` is required.
* `threshold` (`pulumi.Input[float]`) - (Required) Threshold when the notification should be sent.
* `thresholdType` (`pulumi.Input[str]`) - (Required) What kind of threshold is defined. Can be `PERCENTAGE` OR `ABSOLUTE_VALUE`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["account_id"] = account_id
__props__["budget_type"] = budget_type
__props__["cost_filters"] = cost_filters
__props__["cost_types"] = cost_types
__props__["limit_amount"] = limit_amount
__props__["limit_unit"] = limit_unit
__props__["name"] = name
__props__["name_prefix"] = name_prefix
__props__["notifications"] = notifications
__props__["time_period_end"] = time_period_end
__props__["time_period_start"] = time_period_start
__props__["time_unit"] = time_unit
return Budget(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 73.109705
| 317
| 0.699082
| 2,309
| 17,327
| 5.089216
| 0.121265
| 0.054293
| 0.027572
| 0.039061
| 0.850396
| 0.829376
| 0.825632
| 0.810569
| 0.781295
| 0.781295
| 0
| 0.005549
| 0.2095
| 17,327
| 236
| 318
| 73.419492
| 0.852376
| 0.510821
| 0
| 0.02381
| 1
| 0
| 0.167717
| 0.005628
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0.011905
| 0.071429
| 0.02381
| 0.309524
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c68674548afd4979c31e97e341bedff1ce77fc4
| 2,262
|
py
|
Python
|
ssc32py/launch/sample_client.py
|
kalmes/mk-ros-pkg
|
26c4d011506a7a8adbf866741184b79f5103ae3f
|
[
"BSD-3-Clause"
] | 1
|
2019-02-22T14:52:00.000Z
|
2019-02-22T14:52:00.000Z
|
ssc32py/launch/sample_client.py
|
kalmes/mk-ros-pkg
|
26c4d011506a7a8adbf866741184b79f5103ae3f
|
[
"BSD-3-Clause"
] | null | null | null |
ssc32py/launch/sample_client.py
|
kalmes/mk-ros-pkg
|
26c4d011506a7a8adbf866741184b79f5103ae3f
|
[
"BSD-3-Clause"
] | 1
|
2019-06-04T09:18:33.000Z
|
2019-06-04T09:18:33.000Z
|
#!/usr/bin/python
#
# Copyright 2011, Mark Kalmes
#
# This is just a very simple sample client that shows how to
# make requests against the ROS SSC32 server
import roslib
roslib.load_manifest('ssc32py')
import rospy
import time, math
from ssc32py.ros_ssc32 import ROS_SSC32_Client
if __name__ == '__main__':
print "Contacting server.."
base = ROS_SSC32_Client('base')
shoulder = ROS_SSC32_Client('shoulder')
elbow = ROS_SSC32_Client('elbow')
wrist = ROS_SSC32_Client('wrist')
rotate = ROS_SSC32_Client('wrist_rotate')
print "Setup.."
base.move_angle(math.radians(0), endgroup=False)
shoulder.move_angle(math.radians(50), endgroup=False)
elbow.move_angle(math.radians(50), endgroup=False)
wrist.move_angle(math.radians(-55), endgroup=False)
rotate.move_angle(math.radians(0), timesecs=1)
while base.is_moving(): time.sleep(0.1)
print "Complete!"
print "Moving to first position.."
base.move_angle(math.radians(-55), endgroup=False)
shoulder.move_angle(math.radians(50), endgroup=False)
elbow.move_angle(math.radians(50), endgroup=False)
wrist.move_angle(math.radians(-80), endgroup=False)
rotate.move_angle(math.radians(-20), timesecs=5)
print "Waiting for move to complete.."
while base.is_moving(): time.sleep(0.1)
print "Complete!"
time.sleep(2)
print "Moving to second position.."
base.move_angle(math.radians(0), endgroup=False)
shoulder.move_angle(math.radians(50), endgroup=False)
elbow.move_angle(math.radians(70), endgroup=False)
wrist.move_angle(math.radians(-55), endgroup=False)
rotate.move_angle(math.radians(0), timesecs=5)
print "Waiting for move to complete.."
while base.is_moving(): time.sleep(0.1)
print "Complete!"
time.sleep(2)
print "Moving to third position.."
base.move_angle(math.radians(55), endgroup=False)
shoulder.move_angle(math.radians(50), endgroup=False)
elbow.move_angle(math.radians(50), endgroup=False)
wrist.move_angle(math.radians(-80), endgroup=False)
rotate.move_angle(math.radians(20), timesecs=5)
print "Waiting for move to complete.."
while base.is_moving(): time.sleep(0.1)
print "Complete!"
| 31.859155
| 60
| 0.698497
| 318
| 2,262
| 4.820755
| 0.22327
| 0.117417
| 0.169602
| 0.260926
| 0.756686
| 0.72407
| 0.718852
| 0.718852
| 0.718852
| 0.718852
| 0
| 0.039446
| 0.170645
| 2,262
| 70
| 61
| 32.314286
| 0.777719
| 0.064545
| 0
| 0.530612
| 0
| 0
| 0.132701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.081633
| null | null | 0.244898
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8ced0465561d3ce6956db218390ff81d8e946976
| 4,207
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowLispInstanceIdEthernetServer/cli/equal/golden_output_4_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxe/tests/ShowLispInstanceIdEthernetServer/cli/equal/golden_output_4_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxe/tests/ShowLispInstanceIdEthernetServer/cli/equal/golden_output_4_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
"instance_id": {
4100: {"lisp": 0},
8188: {
"lisp": 0,
"site_name": {
"site_uci": {
"any-mac": {
"last_register": "never",
"up": "no",
"who_last_registered": "--",
"inst_id": 8188,
},
"1416.9dff.e928/48": {
"last_register": "2w1d",
"up": "yes#",
"who_last_registered": "10.8.130.4:61275",
"inst_id": 8188,
},
"1416.9dff.eae8/48": {
"last_register": "2w1d",
"up": "yes#",
"who_last_registered": "10.8.130.4:61275",
"inst_id": 8188,
},
"1416.9dff.eb28/48": {
"last_register": "2w1d",
"up": "yes#",
"who_last_registered": "10.8.130.4:61275",
"inst_id": 8188,
},
"1416.9dff.ebc8/48": {
"last_register": "2w1d",
"up": "yes#",
"who_last_registered": "10.8.130.4:61275",
"inst_id": 8188,
},
"1416.9dff.1328/48": {
"last_register": "2w1d",
"up": "yes#",
"who_last_registered": "10.8.130.4:61275",
"inst_id": 8188,
},
"1416.9dff.13e8/48": {
"last_register": "2w1d",
"up": "yes#",
"who_last_registered": "10.8.130.4:61275",
"inst_id": 8188,
},
},
"site_uci2": {
"1416.9dff.16c8/48": {
"last_register": "2w1d",
"up": "yes#",
"who_last_registered": "10.8.130.4:61275",
"inst_id": 8188,
},
"1416.9dff.2428/48": {
"last_register": "2w1d",
"up": "yes#",
"who_last_registered": "10.8.130.4:61275",
"inst_id": 8188,
},
"1416.9dff.10a9/48": {
"last_register": "2w1d",
"up": "yes#",
"who_last_registered": "10.8.130.4:61275",
"inst_id": 8188,
},
"1416.9dff.01eb/48": {
"last_register": "2w1d",
"up": "yes#",
"who_last_registered": "10.8.130.4:61275",
"inst_id": 8188,
},
"1416.9dff.1bcb/48": {
"last_register": "2w1d",
"up": "yes#",
"who_last_registered": "10.8.130.4:61275",
"inst_id": 8188,
},
},
"site_uci3": {
"1416.9dff.248b/48": {
"last_register": "2w1d",
"up": "yes#",
"who_last_registered": "10.8.130.4:61275",
"inst_id": 8188,
},
"1416.9dff.254b/48": {
"last_register": "2w1d",
"up": "yes#",
"who_last_registered": "10.8.130.4:61275",
"inst_id": 8188,
},
"1416.9dff.264b/48": {
"last_register": "2w1d",
"up": "yes#",
"who_last_registered": "10.8.130.4:61275",
"inst_id": 8188,
},
},
},
},
}
}
| 39.317757
| 66
| 0.292132
| 311
| 4,207
| 3.73955
| 0.151125
| 0.154772
| 0.219261
| 0.216681
| 0.844368
| 0.828891
| 0.828891
| 0.828891
| 0.828891
| 0.828891
| 0
| 0.221977
| 0.569527
| 4,207
| 106
| 67
| 39.688679
| 0.42021
| 0
| 0
| 0.537736
| 0
| 0
| 0.299263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
506c580e0074b138400fdf69b32335d7d7b1b641
| 4,364
|
py
|
Python
|
DailyProgrammer/20120613B.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | 2
|
2020-12-23T18:59:22.000Z
|
2021-04-14T13:16:09.000Z
|
DailyProgrammer/20120613B.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
DailyProgrammer/20120613B.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
"""
Find the longest palindrome in the following 1169-character string:
Fourscoreandsevenyearsagoourfaathersbroughtforthonthisconta
inentanewnationconceivedinzLibertyanddedicatedtotheproposit
ionthatallmenarecreatedequalNowweareengagedinagreahtcivilwa
rtestingwhetherthatnaptionoranynartionsoconceivedandsodedic
atedcanlongendureWeareqmetonagreatbattlefiemldoftzhatwarWeh
avecometodedicpateaportionofthatfieldasafinalrestingplacefo
rthosewhoheregavetheirlivesthatthatnationmightliveItisaltog
etherfangandproperthatweshoulddothisButinalargersensewecann
otdedicatewecannotconsecratewecannothallowthisgroundThebrav
elmenlivinganddeadwhostruggledherehaveconsecrateditfarabove
ourpoorponwertoaddordetractTgheworldadswfilllittlenotlenorl
ongrememberwhatwesayherebutitcanneverforgetwhattheydidhereI
tisforusthelivingrathertobededicatedheretotheulnfinishedwor
kwhichtheywhofoughtherehavethusfarsonoblyadvancedItisrather
forustobeherededicatedtothegreattdafskremainingbeforeusthat
fromthesehonoreddeadwetakeincreaseddevotiontothatcauseforwh
ichtheygavethelastpfullmeasureofdevotionthatweherehighlyres
olvethatthesedeadshallnothavediedinvainthatthisnationunsder
Godshallhaveanewbirthoffreedomandthatgovernmentofthepeopleb
ythepeopleforthepeopleshallnotperishfromtheearth
Your task is to write a function that finds the longest palindrome in a string and apply it to the string given above.
taken from http://challenge.greplin.com/ :)
It seems the number of users giving challenges have been reduced. Since my final exams are going on and its kinda
difficult to think of all the challenges, I kindly request you all to suggest us interesting challenges at
/r/dailyprogrammer_ideas .. Thank you!
"""
import numpy as np
def longest_common_substring(str1, str2):
""" from 20120602C """
l1 = len(str1)
l2 = len(str2)
if l2 < l1:
l1, l2 = l2, l1
str1, str2 = str2, str1
L = np.zeros((l1, l2))
z = 0
ret = []
""" based on pseudocode from wikipedia """
for i in range(l1):
for j in range(l2):
if str1[i] == str2[j]:
if i == 0 or j == 0:
L[i, j] = 1
else:
L[i, j] = L[i-1, j-1] + 1
if L[i, j] > z:
z = int(L[i, j])
ret = [str1[i-z+1:i+1]]
elif L[i, j] == z:
ret.append(str1[i-z+1:i+1])
else:
L[i, j] = 0
if len(ret) == 1:
return ret[0]
else:
return ret
def enforce_palindrom(answer):
if type(answer) == list:
answer = list(map(lambda x: x == x[::-1], answer))
return answer
def main():
string = 'Fourscoreandsevenyearsagoourfaathersbroughtforthonthisconta' \
'inentanewnationconceivedinzLibertyanddedicatedtotheproposit' \
'ionthatallmenarecreatedequalNowweareengagedinagreahtcivilwa' \
'rtestingwhetherthatnaptionoranynartionsoconceivedandsodedic' \
'atedcanlongendureWeareqmetonagreatbattlefiemldoftzhatwarWeh' \
'avecometodedicpateaportionofthatfieldasafinalrestingplacefo' \
'rthosewhoheregavetheirlivesthatthatnationmightliveItisaltog' \
'etherfangandproperthatweshoulddothisButinalargersensewecann' \
'otdedicatewecannotconsecratewecannothallowthisgroundThebrav' \
'elmenlivinganddeadwhostruggledherehaveconsecrateditfarabove' \
'ourpoorponwertoaddordetractTgheworldadswfilllittlenotlenorl' \
'ongrememberwhatwesayherebutitcanneverforgetwhattheydidhereI' \
'tisforusthelivingrathertobededicatedheretotheulnfinishedwor' \
'kwhichtheywhofoughtherehavethusfarsonoblyadvancedItisrather' \
'forustobeherededicatedtothegreattdafskremainingbeforeusthat' \
'fromthesehonoreddeadwetakeincreaseddevotiontothatcauseforwh' \
'ichtheygavethelastpfullmeasureofdevotionthatweherehighlyres' \
'olvethatthesedeadshallnothavediedinvainthatthisnationunsder' \
'Godshallhaveanewbirthoffreedomandthatgovernmentofthepeopleb' \
'ythepeopleforthepeopleshallnotperishfromtheearth'
string = string.lower()
answer = longest_common_substring(string, string[::-1])
print(answer)
if __name__ == "__main__":
main()
| 40.785047
| 118
| 0.740834
| 283
| 4,364
| 11.374558
| 0.44523
| 0.004349
| 0.005592
| 0.013669
| 0.740292
| 0.735632
| 0.73004
| 0.73004
| 0.73004
| 0.73004
| 0
| 0.014917
| 0.201192
| 4,364
| 106
| 119
| 41.169811
| 0.908491
| 0.390238
| 0
| 0.051724
| 0
| 0
| 0.451823
| 0.448752
| 0
| 1
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0
| 0.017241
| 0
| 0.12069
| 0.017241
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5086df8a1e0e7359b4ca134709e1922b4e5113aa
| 12,870
|
py
|
Python
|
tests/test_parsing.py
|
andrewguy/CCARL
|
0afda67bcc58be2b6b6bf426cccaab04453c0590
|
[
"MIT"
] | 2
|
2020-05-13T12:50:44.000Z
|
2020-07-27T08:32:42.000Z
|
tests/test_parsing.py
|
andrewguy/CCARL
|
0afda67bcc58be2b6b6bf426cccaab04453c0590
|
[
"MIT"
] | 1
|
2020-04-30T15:33:45.000Z
|
2021-11-30T01:53:18.000Z
|
tests/test_parsing.py
|
andrewguy/CCARL
|
0afda67bcc58be2b6b6bf426cccaab04453c0590
|
[
"MIT"
] | 2
|
2020-12-05T00:25:43.000Z
|
2022-02-10T13:58:35.000Z
|
from ccarl import ccarl
from ccarl.glycan_parsers import cfg_parser, gsl_parser
from collections import Counter
from ccarl.glycan_graph_methods import generate_digraph_from_glycan_string
class TestCFGParsing:
@classmethod
def setup_class(cls):
cls.short_cfg_str_with_linker = 'Galb1-2Mana1-4Galb-Sp1'
cls.short_cfg_str_without_linker = 'Galb1-2Mana1-4Gal'
def test_parsing_with_linker(self):
G = generate_digraph_from_glycan_string(self.short_cfg_str_with_linker, parse_linker=True)
assert len(G._node) == 4
G = generate_digraph_from_glycan_string(self.short_cfg_str_with_linker, parse_linker=False)
assert len(G._node) == 3
def test_parsing_without_linker(self):
G = generate_digraph_from_glycan_string(self.short_cfg_str_without_linker, parse_linker=True)
assert len(G._node) == 3
G = generate_digraph_from_glycan_string(self.short_cfg_str_without_linker, parse_linker=False)
assert len(G._node) == 3
class TestGSLParsing:
@classmethod
def setup_class(cls):
cls.test_glycan_string_1 = '(6S)Galb1-4(Fuca1-3)(Fuca1-5)Ara-olb1-6Galb1-4Glc-Sp21'
cls.test_gsl_string_1 = '''
Fuca-1
|
Galα-3Galß-4Glc-AO
| |
| |
Fucα-2 |
Fucα-3
'''
cls.test_gsl_string_2 = '''
Galα-3Galß-4Glc-AO
| |
Fucα-2 |
Fucα-3'''
cls.test_gsl_sulf_string_1 = 'NeuAcα-(S)-6Galß-4Glcß-(S)-Cer36'
cls.test_gsl_sulf_string_2 = '(6S)NeuAcα-(S)-6Galß-4Glcß-(S)-Cer36'
cls.test_gsl_sulf_string_3 = 'SU-3GlcAß-3Galß-4Glcß-C30'
cls.test_gsl_3 = 'ΔUA-4GlcNS-AO'
cls.test_gsl_comp_mod = '(3-deoxy,3-carboxymethyl)Galß-4Glcß-C30'
cls.test_gsl_agal = 'aGalß-4Glcß-C30'
cls.cfg_parser = cfg_parser.CFGGlycanParser()
cls.gsl_parser = gsl_parser.GSLGlycanParser()
cls.test_small_branched = '''
Galß-4GlcNAcß-6
|
Galß-4GlcNAcß-3Galß-4Glcß-Cer
|
Galß-4GlcNAcß-3'''
cls.test_large_branched = '''
Galß-4GlcNAcß-6
|
Galß-4GlcNAcß-6 Galß-4GlcNAcß-3Galß-4Glcß-Cer
| |
Galß-4GlcNAcß-6 Galß-4GlcNAcß-3
| |
Galß-4GlcNAcß-6 Galß-4GlcNAcß-3
| |
Galß-4GlcNAcß-3
|
Galß-4GlcNAcß-3'''
cls.test_med_branched = '''
Galß-4GlcNAcß-6
GlcNAcß-3 |
| Galß-4GlcNAcß-3Galß-4Glcß-Cer
| |
Galß-4GlcNAcß-3
|
Galß-4GlcNAcß-3'''
cls.test_c8_mod = '''(C8 diastereoisomer)NeuAcα-3Galß-4Glcß-Cer36'''
cls.test_complex_su_mod = '''
SU-2
|
ΔUA-4GlcNSα-4IdoAα-4GlcNSα-4IdoAα-4GlcNSα-4IdoAα-4GlcNSα-4IdoAα-4GlcNSα-4IdoAα-4GlcNSα-4IdoAα-4GlcNS-AO
| | | | | | | | | | | | |
SU-6 SU-2 SU-6 SU-2 SU-6 SU-2 SU-6 SU-2 SU-6 SU-2 SU-6 SU-2 SU-6'''
def test_parsing_glycan_string(self):
parsed_graph, node_labels, vertex_labels = self.cfg_parser.string_to_graph(self.test_glycan_string_1)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 7 # Not parsing linker
assert Counter(node_labels.values()) == Counter(['S', 'Gal', 'Fuc', 'Fuc', 'Ara-ol', 'Gal', 'Glc'])
assert Counter(vertex_labels.values()) == Counter([('', '', '6'), ('b', '1', '4'), ('a', '1', '3'),
('a', '1', '5'), ('b', '1', '6'), ('b', '1', '4')])
def test_parsing_glycan_string_with_linker(self):
parsed_graph, node_labels, vertex_labels = self.cfg_parser.string_to_graph(self.test_glycan_string_1, parse_linker=True)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 8 # Parsing linker
assert Counter(node_labels.values()) == Counter(['S', 'Gal', 'Fuc', 'Fuc', 'Ara-ol', 'Gal', 'Glc', 'Sp21'])
assert Counter(vertex_labels.values()) == Counter([('', '', '6'), ('b', '1', '4'), ('a', '1', '3'),
('a', '1', '5'), ('b', '1', '6'), ('b', '1', '4'),
('', '', '')])
def test_parsing_gsl_string(self):
parsed_graph, node_labels, vertex_labels = self.gsl_parser.string_to_graph(self.test_gsl_string_1)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 6 # Parsing linker
assert Counter(node_labels.values()) == Counter(['Fuc', 'Gal', 'Gal', 'Glc', 'Fuc', 'Fuc'])
assert Counter(vertex_labels.values()) == Counter([('a', '', '1'), ('a', '', '3'), ('b', '', '4'),
('a', '', '2'), ('a', '', '3')])
def test_parsing_gsl_string_with_linker(self):
parsed_graph, node_labels, vertex_labels = self.gsl_parser.string_to_graph(self.test_gsl_string_1, parse_linker=True)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 7 # Parsing linker
assert Counter(node_labels.values()) == Counter(['Fuc', 'Gal', 'Gal', 'Glc', 'Fuc', 'Fuc', 'AO'])
assert Counter(vertex_labels.values()) == Counter([('a', '', '1'), ('a', '', '3'), ('b', '', '4'),
('a', '', '2'), ('a', '', '3'), ('', '', '')])
def test_parsing_gsl_string_2_with_linker(self):
parsed_graph, node_labels, vertex_labels = self.gsl_parser.string_to_graph(self.test_gsl_string_2, parse_linker=True)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 6 # Parsing linker
assert Counter(node_labels.values()) == Counter(['Gal', 'Gal', 'Glc', 'Fuc', 'Fuc', 'AO'])
assert Counter(vertex_labels.values()) == Counter([('a', '', '3'), ('b', '', '4'),
('a', '', '2'), ('a', '', '3'), ('', '', '')])
def test_parsing_gsl_string_1_with_S(self):
parsed_graph, node_labels, vertex_labels = self.gsl_parser.string_to_graph(self.test_gsl_sulf_string_1, parse_linker=True)
print(node_labels)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 6 # Parsing linker
assert Counter(node_labels.values()) == Counter(['Neu5Ac', 'S', 'S', 'Gal', 'Glc', 'Cer36'])
assert Counter(vertex_labels.values()) == Counter([('a', '', ''), ('', '', '6'),
('b', '', '4'), ('b', '', ''), ('', '', '')])
def test_parsing_gsl_string_2_with_S(self):
parsed_graph, node_labels, vertex_labels = self.gsl_parser.string_to_graph(self.test_gsl_sulf_string_2, parse_linker=True)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 7 # Parsing linker
assert Counter(node_labels.values()) == Counter(['S', 'Neu5Ac', 'S', 'S', 'Gal', 'Glc', 'Cer36'])
assert Counter(vertex_labels.values()) == Counter([('', '', '6'), ('a', '', ''), ('', '', '6'),
('b', '', '4'), ('b', '', ''), ('', '', '')])
def test_parsing_gsl_string_with_UA(self):
parsed_graph, node_labels, vertex_labels = self.gsl_parser.string_to_graph(self.test_gsl_3, parse_linker=True)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 3 # Parsing linker
assert Counter(node_labels.values()) == Counter(['ΔUA', 'GlcNS', 'AO'])
assert Counter(vertex_labels.values()) == Counter([('', '', ''), ('', '', '4')])
def test_parsing_gsl_string_with_complex_modification(self):
parsed_graph, node_labels, vertex_labels = self.gsl_parser.string_to_graph(self.test_gsl_comp_mod, parse_linker=True)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 4 # Parsing linker
assert Counter(node_labels.values()) == Counter(['3-deoxy,3-carboxymethyl', 'Gal', 'Glc', 'C30'])
assert Counter(vertex_labels.values()) == Counter([('', '', ''), ('b', '', '4'), ('b', '', '')])
def test_parsing_gsl_string_with_SU(self):
parsed_graph, node_labels, vertex_labels = self.gsl_parser.string_to_graph(self.test_gsl_sulf_string_3, parse_linker=True)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 5 # Parsing linker
assert Counter(node_labels.values()) == Counter(['S', 'GlcA', 'Gal', 'Glc', 'C30'])
assert Counter(vertex_labels.values()) == Counter([('', '', '3'), ('b', '', '3'), ('b', '', '4'), ('b', '', '')])
def test_parsing_gsl_string_with_agal(self):
parsed_graph, node_labels, vertex_labels = self.gsl_parser.string_to_graph(self.test_gsl_agal, parse_linker=True)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 3 # Parsing linker
assert Counter(node_labels.values()) == Counter(['aGal', 'Glc', 'C30'])
assert Counter(vertex_labels.values()) == Counter([('b', '', '4'), ('b', '', '')])
def test_parsing_gsl_small_branch(self):
parsed_graph, node_labels, vertex_labels = self.gsl_parser.string_to_graph(self.test_small_branched, parse_linker=True)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 9 # Parsing linker
assert Counter(node_labels.values()) == Counter(['Gal', 'GlcNAc', 'Gal', 'GlcNAc', 'Gal', 'GlcNAc', 'Gal', 'Glc', 'Cer'])
#assert Counter(vertex_labels.values()) == Counter([('b', '', '4'), ('b', '', '')])
def test_parsing_gsl_med_branch(self):
parsed_graph, node_labels, vertex_labels = self.gsl_parser.string_to_graph(self.test_med_branched, parse_linker=True)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 12 # Parsing linker
#assert Counter(node_labels.values()) == Counter(['aGal', 'Glc', 'C30'])
#assert Counter(vertex_labels.values()) == Counter([('b', '', '4'), ('b', '', '')])
def test_parsing_gsl_large_branch(self):
parsed_graph, node_labels, vertex_labels = self.gsl_parser.string_to_graph(self.test_large_branched, parse_linker=True)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 21 # Parsing linker
#assert Counter(node_labels.values()) == Counter(['aGal', 'Glc', 'C30'])
#assert Counter(vertex_labels.values()) == Counter([('b', '', '4'), ('b', '', '')])
def test_parsing_gsl_c8(self):
parsed_graph, node_labels, vertex_labels = self.gsl_parser.string_to_graph(self.test_c8_mod, parse_linker=True)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 5 # Parsing linker
assert Counter(node_labels.values()) == Counter(['C8_diastereoisomer', 'Neu5Ac', 'Gal', 'Glc', 'Cer36'])
assert Counter(vertex_labels.values()) == Counter([('', '', ''), ('a', '', '3'), ('b', '', '4'), ('b', '', '')])
def test_parsing_gsl_su_mod(self):
parsed_graph, node_labels, vertex_labels = self.gsl_parser.string_to_graph(self.test_complex_su_mod, parse_linker=True)
assert isinstance(node_labels, dict)
assert isinstance(vertex_labels, dict)
assert len(node_labels) == 29 # Parsing linker
#assert Counter(node_labels.values()) == Counter(['C8_diastereoisomer', 'Neu5Ac', 'Gal', 'Glc', 'Cer36'])
#assert Counter(vertex_labels.values()) == Counter([('', '', ''), ('a', '', '3'), ('b', '', '4'), ('b', '', '')])
| 57.455357
| 130
| 0.56519
| 1,496
| 12,870
| 4.581551
| 0.080214
| 0.094835
| 0.074701
| 0.044354
| 0.888824
| 0.859352
| 0.841844
| 0.83557
| 0.801284
| 0.772104
| 0
| 0.026766
| 0.277156
| 12,870
| 223
| 131
| 57.713004
| 0.709986
| 0.065812
| 0
| 0.40625
| 1
| 0.015625
| 0.198116
| 0.038423
| 0
| 0
| 0
| 0
| 0.401042
| 1
| 0.104167
| false
| 0
| 0.020833
| 0
| 0.135417
| 0.005208
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
50a37190b4fee3f1790cdefffdda36b7355f6183
| 208,681
|
py
|
Python
|
pyboto3/route53.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
pyboto3/route53.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
pyboto3/route53.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def associate_vpc_with_hosted_zone(HostedZoneId=None, VPC=None, Comment=None):
"""
Associates an Amazon VPC with a private hosted zone.
See also: AWS API Documentation
:example: response = client.associate_vpc_with_hosted_zone(
HostedZoneId='string',
VPC={
'VPCRegion': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-west-2'|'eu-central-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-south-1'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1'|'ca-central-1'|'cn-north-1',
'VPCId': 'string'
},
Comment='string'
)
:type HostedZoneId: string
:param HostedZoneId: [REQUIRED]
The ID of the private hosted zone that you want to associate an Amazon VPC with.
Note that you can't associate a VPC with a hosted zone that doesn't have an existing VPC association.
:type VPC: dict
:param VPC: [REQUIRED]
A complex type that contains information about the VPC that you want to associate with a private hosted zone.
VPCRegion (string) --(Private hosted zones only) The region in which you created an Amazon VPC.
VPCId (string) --(Private hosted zones only) The ID of an Amazon VPC.
:type Comment: string
:param Comment: Optional: A comment about the association request.
:rtype: dict
:return: {
'ChangeInfo': {
'Id': 'string',
'Status': 'PENDING'|'INSYNC',
'SubmittedAt': datetime(2015, 1, 1),
'Comment': 'string'
}
}
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def change_resource_record_sets(HostedZoneId=None, ChangeBatch=None):
"""
Creates, changes, or deletes a resource record set, which contains authoritative DNS information for a specified domain name or subdomain name. For example, you can use ChangeResourceRecordSets to create a resource record set that routes traffic for test.example.com to a web server that has an IP address of 192.0.2.44.
The request body must include a document with a ChangeResourceRecordSetsRequest element. The request body contains a list of change items, known as a change batch. Change batches are considered transactional changes. When using the Amazon Route 53 API to change resource record sets, Amazon Route 53 either makes all or none of the changes in a change batch request. This ensures that Amazon Route 53 never partially implements the intended changes to the resource record sets in a hosted zone.
For example, a change batch request that deletes the CNAME record for www.example.com and creates an alias resource record set for www.example.com. Amazon Route 53 deletes the first resource record set and creates the second resource record set in a single operation. If either the DELETE or the CREATE action fails, then both changes (plus any other changes in the batch) fail, and the original CNAME record continues to exist.
To create resource record sets for complex routing configurations, use either the traffic flow visual editor in the Amazon Route 53 console or the API actions for traffic policies and traffic policy instances. Save the configuration as a traffic policy, then associate the traffic policy with one or more domain names (such as example.com) or subdomain names (such as www.example.com), in the same hosted zone or in multiple hosted zones. You can roll back the updates if the new configuration isn't performing as expected. For more information, see Using Traffic Flow to Route DNS Traffic in the Amazon Route 53 Developer Guide .
Use ChangeResourceRecordsSetsRequest to perform the following actions:
The syntax for a request depends on the type of resource record set that you want to create, delete, or update, such as weighted, alias, or failover. The XML elements in your request must appear in the order listed in the syntax.
For an example for each type of resource record set, see "Examples."
Don't refer to the syntax in the "Parameter Syntax" section, which includes all of the elements for every kind of resource record set that you can create, delete, or update by using ChangeResourceRecordSets .
When you submit a ChangeResourceRecordSets request, Amazon Route 53 propagates your changes to all of the Amazon Route 53 authoritative DNS servers. While your changes are propagating, GetChange returns a status of PENDING . When propagation is complete, GetChange returns a status of INSYNC . Changes generally propagate to all Amazon Route 53 name servers in a few minutes. In rare circumstances, propagation can take up to 30 minutes. For more information, see GetChange .
For information about the limits on a ChangeResourceRecordSets request, see Limits in the Amazon Route 53 Developer Guide .
See also: AWS API Documentation
:example: response = client.change_resource_record_sets(
HostedZoneId='string',
ChangeBatch={
'Comment': 'string',
'Changes': [
{
'Action': 'CREATE'|'DELETE'|'UPSERT',
'ResourceRecordSet': {
'Name': 'string',
'Type': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
'SetIdentifier': 'string',
'Weight': 123,
'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-west-1'|'eu-west-2'|'eu-central-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1'|'cn-north-1'|'ap-south-1',
'GeoLocation': {
'ContinentCode': 'string',
'CountryCode': 'string',
'SubdivisionCode': 'string'
},
'Failover': 'PRIMARY'|'SECONDARY',
'TTL': 123,
'ResourceRecords': [
{
'Value': 'string'
},
],
'AliasTarget': {
'HostedZoneId': 'string',
'DNSName': 'string',
'EvaluateTargetHealth': True|False
},
'HealthCheckId': 'string',
'TrafficPolicyInstanceId': 'string'
}
},
]
}
)
:type HostedZoneId: string
:param HostedZoneId: [REQUIRED]
The ID of the hosted zone that contains the resource record sets that you want to change.
:type ChangeBatch: dict
:param ChangeBatch: [REQUIRED]
A complex type that contains an optional comment and the Changes element.
Comment (string) --
Optional: Any comments you want to include about a change batch request.
Changes (list) -- [REQUIRED]Information about the changes to make to the record sets.
(dict) --The information for each resource record set that you want to change.
Action (string) -- [REQUIRED]The action to perform:
CREATE : Creates a resource record set that has the specified values.
DELETE : Deletes a existing resource record set.
Warning
To delete the resource record set that is associated with a traffic policy instance, use `` DeleteTrafficPolicyInstance `` . Amazon Route 53 will delete the resource record set automatically. If you delete the resource record set by using ChangeResourceRecordSets , Amazon Route 53 doesn't automatically delete the traffic policy instance, and you'll continue to be charged for it even though it's no longer in use.
UPSERT : If a resource record set doesn't already exist, Amazon Route 53 creates it. If a resource record set does exist, Amazon Route 53 updates it with the values in the request.
The values that you need to include in the request depend on the type of resource record set that you're creating, deleting, or updating:
Basic resource record sets (excluding alias, failover, geolocation, latency, and weighted resource record sets)
Name
Type
TTL
Failover, geolocation, latency, or weighted resource record sets (excluding alias resource record sets)
Name
Type
TTL
SetIdentifier
Alias resource record sets (including failover alias, geolocation alias, latency alias, and weighted alias resource record sets)
Name
Type
AliasTarget (includes DNSName , EvaluateTargetHealth , and HostedZoneId )
SetIdentifier (for failover, geolocation, latency, and weighted resource record sets)
ResourceRecordSet (dict) -- [REQUIRED]Information about the resource record set to create, delete, or update.
Name (string) -- [REQUIRED]The name of the domain you want to perform the action on.
Enter a fully qualified domain name, for example, www.example.com . You can optionally include a trailing dot. If you omit the trailing dot, Amazon Route 53 still assumes that the domain name that you specify is fully qualified. This means that Amazon Route 53 treats www.example.com (without a trailing dot) and www.example.com. (with a trailing dot) as identical.
For information about how to specify characters other than a-z , 0-9 , and - (hyphen) and how to specify internationalized domain names, see DNS Domain Name Format in the Amazon Route 53 Developer Guide .
You can use the asterisk (*) wildcard to replace the leftmost label in a domain name, for example, *.example.com . Note the following:
The * must replace the entire label. For example, you can't specify *prod.example.com or prod*.example.com .
The * can't replace any of the middle labels, for example, marketing.*.example.com.
If you include * in any position other than the leftmost label in a domain name, DNS treats it as an * character (ASCII 42), not as a wildcard.
Warning
You can't use the * wildcard for resource records sets that have a type of NS.
You can use the * wildcard as the leftmost label in a domain name, for example, *.example.com . You can't use an * for one of the middle labels, for example, marketing.*.example.com . In addition, the * must replace the entire label; for example, you can't specify prod*.example.com .
Type (string) -- [REQUIRED]The DNS record type. For information about different record types and how data is encoded for them, see Supported DNS Resource Record Types in the Amazon Route 53 Developer Guide .
Valid values for basic resource record sets: A | AAAA | CNAME | MX | NAPTR | NS | PTR | SOA | SPF | SRV | TXT
Values for weighted, latency, geolocation, and failover resource record sets: A | AAAA | CNAME | MX | NAPTR | PTR | SPF | SRV | TXT . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.
Note
SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of Type is SPF . RFC 7208, Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1 , has been updated to say, '...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.' In RFC 7208, see section 14.1, The SPF DNS Record Type .
Values for alias resource record sets:
CloudFront distributions: A If IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of A and one with a value of AAAA .
AWS Elastic Beanstalk environment that has a regionalized subdomain : A
ELB load balancers: A | AAAA
Amazon S3 buckets: A
Another resource record set in this hosted zone: Specify the type of the resource record set for which you're creating the alias. Specify any value except NS or SOA .
SetIdentifier (string) --
Weighted, Latency, Geo, and Failover resource record sets only: An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. The value of SetIdentifier must be unique for each resource record set that has the same combination of DNS name and type. Omit SetIdentifier for any other types of record sets.
Weight (integer) --
Weighted resource record sets only: Among resource record sets that have the same combination of DNS name and type, a value that determines the proportion of DNS queries that Amazon Route 53 responds to using the current resource record set. Amazon Route 53 calculates the sum of the weights for the resource record sets that have the same combination of DNS name and type. Amazon Route 53 then responds to queries based on the ratio of a resource's weight to the total. Note the following:
You must specify a value for the Weight element for every weighted resource record set.
You can only specify one ResourceRecord per weighted resource record set.
You can't create latency, failover, or geolocation resource record sets that have the same values for the Name and Type elements as weighted resource record sets.
You can create a maximum of 100 weighted resource record sets that have the same values for the Name and Type elements.
For weighted (but not weighted alias) resource record sets, if you set Weight to 0 for a resource record set, Amazon Route 53 never responds to queries with the applicable value for that resource record set. However, if you set Weight to 0 for all resource record sets that have the same combination of DNS name and type, traffic is routed to all resources with equal probability. The effect of setting Weight to 0 is different when you associate health checks with weighted resource record sets. For more information, see Options for Configuring Amazon Route 53 Active-Active and Active-Passive Failover in the Amazon Route 53 Developer Guide .
Region (string) --
Latency-based resource record sets only: The Amazon EC2 Region where you created the resource that this resource record set refers to. The resource typically is an AWS resource, such as an EC2 instance or an ELB load balancer, and is referred to by an IP address or a DNS domain name, depending on the record type.
Note
Creating latency and latency alias resource record sets in private hosted zones is not supported.
When Amazon Route 53 receives a DNS query for a domain name and type for which you have created latency resource record sets, Amazon Route 53 selects the latency resource record set that has the lowest latency between the end user and the associated Amazon EC2 Region. Amazon Route 53 then returns the value that is associated with the selected resource record set.
Note the following:
You can only specify one ResourceRecord per latency resource record set.
You can only create one latency resource record set for each Amazon EC2 Region.
You aren't required to create latency resource record sets for all Amazon EC2 Regions. Amazon Route 53 will choose the region with the best latency from among the regions that you create latency resource record sets for.
You can't create non-latency resource record sets that have the same values for the Name and Type elements as latency resource record sets.
GeoLocation (dict) --
Geo location resource record sets only: A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of 192.0.2.111 , create a resource record set with a Type of A and a ContinentCode of AF .
Note
Creating geolocation and geolocation alias resource record sets in private hosted zones is not supported.
If you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.
You can't create two geolocation resource record sets that specify the same geographic location.
The value * in the CountryCode element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the Name and Type elements.
Warning
Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Amazon Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of CountryCode is * , which handles both queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a * resource record set, Amazon Route 53 returns a 'no answer' response for queries from those locations.
You can't create non-geolocation resource record sets that have the same values for the Name and Type elements as geolocation resource record sets.
ContinentCode (string) --The two-letter code for the continent.
Valid values: AF | AN | AS | EU | OC | NA | SA
Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode returns an InvalidInput error.
CountryCode (string) --The two-letter code for the country.
SubdivisionCode (string) --The code for the subdivision, for example, a state in the United States or a province in Canada.
Failover (string) --
Failover resource record sets only: To configure failover, you add the Failover element to two resource record sets. For one resource record set, you specify PRIMARY as the value for Failover ; for the other resource record set, you specify SECONDARY . In addition, you include the HealthCheckId element and specify the health check that you want Amazon Route 53 to perform for each resource record set.
Except where noted, the following failover behaviors assume that you have included the HealthCheckId element in both resource record sets:
When the primary resource record set is healthy, Amazon Route 53 responds to DNS queries with the applicable value from the primary resource record set regardless of the health of the secondary resource record set.
When the primary resource record set is unhealthy and the secondary resource record set is healthy, Amazon Route 53 responds to DNS queries with the applicable value from the secondary resource record set.
When the secondary resource record set is unhealthy, Amazon Route 53 responds to DNS queries with the applicable value from the primary resource record set regardless of the health of the primary resource record set.
If you omit the HealthCheckId element for the secondary resource record set, and if the primary resource record set is unhealthy, Amazon Route 53 always responds to DNS queries with the applicable value from the secondary resource record set. This is true regardless of the health of the associated endpoint.
You can't create non-failover resource record sets that have the same values for the Name and Type elements as failover resource record sets.
For failover alias resource record sets, you must also include the EvaluateTargetHealth element and set the value to true.
For more information about configuring failover for Amazon Route 53, see the following topics in the Amazon Route 53 Developer Guide :
Amazon Route 53 Health Checks and DNS Failover
Configuring Failover in a Private Hosted Zone
TTL (integer) --The resource record cache time to live (TTL), in seconds. Note the following:
If you're creating an alias resource record set, omit TTL . Amazon Route 53 uses the value of TTL for the alias target.
If you're associating this resource record set with a health check (if you're adding a HealthCheckId element), we recommend that you specify a TTL of 60 seconds or less so clients respond quickly to changes in health status.
All of the resource record sets in a group of weighted resource record sets must have the same value for TTL .
If a group of weighted resource record sets includes one or more weighted alias resource record sets for which the alias target is an ELB load balancer, we recommend that you specify a TTL of 60 seconds for all of the non-alias weighted resource record sets that have the same name and type. Values other than 60 seconds (the TTL for load balancers) will change the effect of the values that you specify for Weight .
ResourceRecords (list) --Information about the resource records to act upon.
Note
If you're creating an alias resource record set, omit ResourceRecords .
(dict) --Information specific to the resource record.
Note
If you're creating an alias resource record set, omit ResourceRecord .
Value (string) -- [REQUIRED]The current or new DNS record value, not to exceed 4,000 characters. In the case of a DELETE action, if the current value does not match the actual value, an error is returned. For descriptions about how to format Value for different record types, see Supported DNS Resource Record Types in the Amazon Route 53 Developer Guide .
You can specify more than one value for all record types except CNAME and SOA .
Note
If you're creating an alias resource record set, omit Value .
AliasTarget (dict) --
Alias resource record sets only: Information about the CloudFront distribution, AWS Elastic Beanstalk environment, ELB load balancer, Amazon S3 bucket, or Amazon Route 53 resource record set to which you're redirecting queries. The AWS Elastic Beanstalk environment must have a regionalized subdomain.
If you're creating resource records sets for a private hosted zone, note the following:
You can't create alias resource record sets for CloudFront distributions in a private hosted zone.
Creating geolocation alias resource record sets or latency alias resource record sets in a private hosted zone is unsupported.
For information about creating failover resource record sets in a private hosted zone, see Configuring Failover in a Private Hosted Zone in the Amazon Route 53 Developer Guide .
HostedZoneId (string) -- [REQUIRED]
Alias resource records sets only : The value used depends on where you want to route traffic:
CloudFront distribution
Specify Z2FDTNDATAQYW2 .
Note
Alias resource record sets for CloudFront can't be created in a private zone.
Elastic Beanstalk environment
Specify the hosted zone ID for the region in which you created the environment. The environment must have a regionalized subdomain. For a list of regions and the corresponding hosted zone IDs, see AWS Elastic Beanstalk in the 'AWS Regions and Endpoints' chapter of the Amazon Web Services General Reference .
ELB load balancer
Specify the value of the hosted zone ID for the load balancer. Use the following methods to get the hosted zone ID:
Elastic Load Balancing table in the 'AWS Regions and Endpoints' chapter of the Amazon Web Services General Reference : Use the value in the 'Amazon Route 53 Hosted Zone ID' column that corresponds with the region that you created your load balancer in.
AWS Management Console : Go to the Amazon EC2 page, click Load Balancers in the navigation pane, select the load balancer, and get the value of the Hosted zone field on the Description tab.
Elastic Load Balancing API : Use DescribeLoadBalancers to get the value of CanonicalHostedZoneNameId . For more information, see the applicable guide:
Classic Load Balancer: DescribeLoadBalancers
Application Load Balancer: DescribeLoadBalancers
AWS CLI : Use `` describe-load-balancers `` to get the value of CanonicalHostedZoneNameID .An Amazon S3 bucket configured as a static website
Specify the hosted zone ID for the region that you created the bucket in. For more information about valid values, see the Amazon Simple Storage Service Website Endpoints table in the 'AWS Regions and Endpoints' chapter of the Amazon Web Services General Reference .
Another Amazon Route 53 resource record set in your hosted zone
Specify the hosted zone ID of your hosted zone. (An alias resource record set can't reference a resource record set in a different hosted zone.)
DNSName (string) -- [REQUIRED]
Alias resource record sets only: The value that you specify depends on where you want to route queries:
CloudFront distribution
Specify the domain name that CloudFront assigned when you created your distribution.
Your CloudFront distribution must include an alternate domain name that matches the name of the resource record set. For example, if the name of the resource record set is acme.example.com , your CloudFront distribution must include acme.example.com as one of the alternate domain names. For more information, see Using Alternate Domain Names (CNAMEs) in the Amazon CloudFront Developer Guide .
Elastic Beanstalk environment
Specify the CNAME attribute for the environment. (The environment must have a regionalized domain name.) You can use the following methods to get the value of the CNAME attribute:
AWS Management Console : For information about how to get the value by using the console, see Using Custom Domains with AWS Elastic Beanstalk in the AWS Elastic Beanstalk Developer Guide .
Elastic Beanstalk API : Use the DescribeEnvironments action to get the value of the CNAME attribute. For more information, see DescribeEnvironments in the AWS Elastic Beanstalk API Reference .
AWS CLI : Use the describe-environments command to get the value of the CNAME attribute. For more information, see describe-environments in the AWS Command Line Interface Reference .ELB load balancer
Specify the DNS name that is associated with the load balancer. Get the DNS name by using the AWS Management Console, the ELB API, or the AWS CLI.
AWS Management Console : Go to the EC2 page, choose Load Balancers in the navigation pane, choose the load balancer, choose the Description tab, and get the value of the DNS name field. (If you're routing traffic to a Classic Load Balancer, get the value that begins with dualstack .)
Elastic Load Balancing API : Use DescribeLoadBalancers to get the value of DNSName . For more information, see the applicable guide:
Classic Load Balancer: DescribeLoadBalancers
Application Load Balancer: DescribeLoadBalancers
AWS CLI : Use `` describe-load-balancers `` to get the value of DNSName .Amazon S3 bucket that is configured as a static website
Specify the domain name of the Amazon S3 website endpoint in which you created the bucket, for example, s3-website-us-east-2.amazonaws.com . For more information about valid values, see the table Amazon Simple Storage Service (S3) Website Endpoints in the Amazon Web Services General Reference . For more information about using S3 buckets for websites, see Getting Started with Amazon Route 53 in the Amazon Route 53 Developer Guide.
Another Amazon Route 53 resource record set
Specify the value of the Name element for a resource record set in the current hosted zone.
EvaluateTargetHealth (boolean) -- [REQUIRED]
Applies only to alias, failover alias, geolocation alias, latency alias, and weighted alias resource record sets: When EvaluateTargetHealth is true , an alias resource record set inherits the health of the referenced AWS resource, such as an ELB load balancer, or the referenced resource record set.
Note the following:
You can't set EvaluateTargetHealth to true when the alias target is a CloudFront distribution.
If the AWS resource that you specify in AliasTarget is a resource record set or a group of resource record sets (for example, a group of weighted resource record sets), but it is not another alias resource record set, we recommend that you associate a health check with all of the resource record sets in the alias target. For more information, see What Happens When You Omit Health Checks? in the Amazon Route 53 Developer Guide .
If you specify an Elastic Beanstalk environment in HostedZoneId and DNSName , and if the environment contains an ELB load balancer, Elastic Load Balancing routes queries only to the healthy Amazon EC2 instances that are registered with the load balancer. (An environment automatically contains an ELB load balancer if it includes more than one EC2 instance.) If you set EvaluateTargetHealth to true and either no EC2 instances are healthy or the load balancer itself is unhealthy, Amazon Route 53 routes queries to other available resources that are healthy, if any. If the environment contains a single EC2 instance, there are no special requirements.
If you specify an ELB load balancer in `` AliasTarget `` , ELB routes queries only to the healthy EC2 instances that are registered with the load balancer. If no EC2 instances are healthy or if the load balancer itself is unhealthy, and if EvaluateTargetHealth is true for the corresponding alias resource record set, Amazon Route 53 routes queries to other resources. When you create a load balancer, you configure settings for ELB health checks; they're not Amazon Route 53 health checks, but they perform a similar function. Do not create Amazon Route 53 health checks for the EC2 instances that you register with an ELB load balancer. For more information, see How Health Checks Work in More Complex Amazon Route 53 Configurations in the Amazon Route 53 Developer Guide .
We recommend that you set EvaluateTargetHealth to true only when you have enough idle capacity to handle the failure of one or more endpoints.
For more information and examples, see Amazon Route 53 Health Checks and DNS Failover in the Amazon Route 53 Developer Guide .
HealthCheckId (string) --If you want Amazon Route 53 to return this resource record set in response to a DNS query only when a health check is passing, include the HealthCheckId element and specify the ID of the applicable health check.
Amazon Route 53 determines whether a resource record set is healthy based on one of the following:
By periodically sending a request to the endpoint that is specified in the health check
By aggregating the status of a specified group of health checks (calculated health checks)
By determining the current state of a CloudWatch alarm (CloudWatch metric health checks)
For more information, see How Amazon Route 53 Determines Whether an Endpoint Is Healthy .
The HealthCheckId element is only useful when Amazon Route 53 is choosing between two or more resource record sets to respond to a DNS query, and you want Amazon Route 53 to base the choice in part on the status of a health check. Configuring health checks only makes sense in the following configurations:
You're checking the health of the resource record sets in a group of weighted, latency, geolocation, or failover resource record sets, and you specify health check IDs for all of the resource record sets. If the health check for one resource record set specifies an endpoint that is not healthy, Amazon Route 53 stops responding to queries using the value for that resource record set.
You set EvaluateTargetHealth to true for the resource record sets in a group of alias, weighted alias, latency alias, geolocation alias, or failover alias resource record sets, and you specify health check IDs for all of the resource record sets that are referenced by the alias resource record sets.
Warning
Amazon Route 53 doesn't check the health of the endpoint specified in the resource record set, for example, the endpoint specified by the IP address in the Value element. When you add a HealthCheckId element to a resource record set, Amazon Route 53 checks the health of the endpoint that you specified in the health check.
For geolocation resource record sets, if an endpoint is unhealthy, Amazon Route 53 looks for a resource record set for the larger, associated geographic region. For example, suppose you have resource record sets for a state in the United States, for the United States, for North America, and for all locations. If the endpoint for the state resource record set is unhealthy, Amazon Route 53 checks the resource record sets for the United States, for North America, and for all locations (a resource record set for which the value of CountryCode is * ), in that order, until it finds a resource record set for which the endpoint is healthy.
If your health checks specify the endpoint only by domain name, we recommend that you create a separate health check for each endpoint. For example, create a health check for each HTTP server that is serving content for www.example.com . For the value of FullyQualifiedDomainName , specify the domain name of the server (such as us-east-2-www.example.com ), not the name of the resource record sets (example.com).
Warning
n this configuration, if you create a health check for which the value of FullyQualifiedDomainName matches the name of the resource record sets and then associate the health check with those resource record sets, health check results will be unpredictable.
For more information, see the following topics in the Amazon Route 53 Developer Guide :
Amazon Route 53 Health Checks and DNS Failover
Configuring Failover in a Private Hosted Zone
TrafficPolicyInstanceId (string) --When you create a traffic policy instance, Amazon Route 53 automatically creates a resource record set. TrafficPolicyInstanceId is the ID of the traffic policy instance that Amazon Route 53 created this resource record set for.
Warning
To delete the resource record set that is associated with a traffic policy instance, use DeleteTrafficPolicyInstance . Amazon Route 53 will delete the resource record set automatically. If you delete the resource record set by using ChangeResourceRecordSets , Amazon Route 53 doesn't automatically delete the traffic policy instance, and you'll continue to be charged for it even though it's no longer in use.
:rtype: dict
:return: {
'ChangeInfo': {
'Id': 'string',
'Status': 'PENDING'|'INSYNC',
'SubmittedAt': datetime(2015, 1, 1),
'Comment': 'string'
}
}
:returns:
HostedZoneId (string) -- [REQUIRED]
The ID of the hosted zone that contains the resource record sets that you want to change.
ChangeBatch (dict) -- [REQUIRED]
A complex type that contains an optional comment and the Changes element.
Comment (string) --
Optional: Any comments you want to include about a change batch request.
Changes (list) -- [REQUIRED]Information about the changes to make to the record sets.
(dict) --The information for each resource record set that you want to change.
Action (string) -- [REQUIRED]The action to perform:
CREATE : Creates a resource record set that has the specified values.
DELETE : Deletes a existing resource record set.
Warning
To delete the resource record set that is associated with a traffic policy instance, use `` DeleteTrafficPolicyInstance `` . Amazon Route 53 will delete the resource record set automatically. If you delete the resource record set by using ChangeResourceRecordSets , Amazon Route 53 doesn't automatically delete the traffic policy instance, and you'll continue to be charged for it even though it's no longer in use.
UPSERT : If a resource record set doesn't already exist, Amazon Route 53 creates it. If a resource record set does exist, Amazon Route 53 updates it with the values in the request.
The values that you need to include in the request depend on the type of resource record set that you're creating, deleting, or updating:
Basic resource record sets (excluding alias, failover, geolocation, latency, and weighted resource record sets)
Name
Type
TTL
Failover, geolocation, latency, or weighted resource record sets (excluding alias resource record sets)
Name
Type
TTL
SetIdentifier
Alias resource record sets (including failover alias, geolocation alias, latency alias, and weighted alias resource record sets)
Name
Type
AliasTarget (includes DNSName , EvaluateTargetHealth , and HostedZoneId )
SetIdentifier (for failover, geolocation, latency, and weighted resource record sets)
ResourceRecordSet (dict) -- [REQUIRED]Information about the resource record set to create, delete, or update.
Name (string) -- [REQUIRED]The name of the domain you want to perform the action on.
Enter a fully qualified domain name, for example, www.example.com . You can optionally include a trailing dot. If you omit the trailing dot, Amazon Route 53 still assumes that the domain name that you specify is fully qualified. This means that Amazon Route 53 treats www.example.com (without a trailing dot) and www.example.com. (with a trailing dot) as identical.
For information about how to specify characters other than a-z , 0-9 , and - (hyphen) and how to specify internationalized domain names, see DNS Domain Name Format in the Amazon Route 53 Developer Guide .
You can use the asterisk (*) wildcard to replace the leftmost label in a domain name, for example, *.example.com . Note the following:
The * must replace the entire label. For example, you can't specify *prod.example.com or prod*.example.com .
The * can't replace any of the middle labels, for example, marketing.*.example.com.
If you include * in any position other than the leftmost label in a domain name, DNS treats it as an * character (ASCII 42), not as a wildcard.
Warning
You can't use the * wildcard for resource records sets that have a type of NS.
You can use the * wildcard as the leftmost label in a domain name, for example, *.example.com . You can't use an * for one of the middle labels, for example, marketing.*.example.com . In addition, the * must replace the entire label; for example, you can't specify prod*.example.com .
Type (string) -- [REQUIRED]The DNS record type. For information about different record types and how data is encoded for them, see Supported DNS Resource Record Types in the Amazon Route 53 Developer Guide .
Valid values for basic resource record sets: A | AAAA | CNAME | MX | NAPTR | NS | PTR | SOA | SPF | SRV | TXT
Values for weighted, latency, geolocation, and failover resource record sets: A | AAAA | CNAME | MX | NAPTR | PTR | SPF | SRV | TXT . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.
Note
SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of Type is SPF . RFC 7208, Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1 , has been updated to say, "...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it." In RFC 7208, see section 14.1, The SPF DNS Record Type .
Values for alias resource record sets:
CloudFront distributions: A If IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of A and one with a value of AAAA .
AWS Elastic Beanstalk environment that has a regionalized subdomain : A
ELB load balancers: A | AAAA
Amazon S3 buckets: A
Another resource record set in this hosted zone: Specify the type of the resource record set for which you're creating the alias. Specify any value except NS or SOA .
SetIdentifier (string) --
Weighted, Latency, Geo, and Failover resource record sets only: An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. The value of SetIdentifier must be unique for each resource record set that has the same combination of DNS name and type. Omit SetIdentifier for any other types of record sets.
Weight (integer) --
Weighted resource record sets only: Among resource record sets that have the same combination of DNS name and type, a value that determines the proportion of DNS queries that Amazon Route 53 responds to using the current resource record set. Amazon Route 53 calculates the sum of the weights for the resource record sets that have the same combination of DNS name and type. Amazon Route 53 then responds to queries based on the ratio of a resource's weight to the total. Note the following:
You must specify a value for the Weight element for every weighted resource record set.
You can only specify one ResourceRecord per weighted resource record set.
You can't create latency, failover, or geolocation resource record sets that have the same values for the Name and Type elements as weighted resource record sets.
You can create a maximum of 100 weighted resource record sets that have the same values for the Name and Type elements.
For weighted (but not weighted alias) resource record sets, if you set Weight to 0 for a resource record set, Amazon Route 53 never responds to queries with the applicable value for that resource record set. However, if you set Weight to 0 for all resource record sets that have the same combination of DNS name and type, traffic is routed to all resources with equal probability. The effect of setting Weight to 0 is different when you associate health checks with weighted resource record sets. For more information, see Options for Configuring Amazon Route 53 Active-Active and Active-Passive Failover in the Amazon Route 53 Developer Guide .
Region (string) --
Latency-based resource record sets only: The Amazon EC2 Region where you created the resource that this resource record set refers to. The resource typically is an AWS resource, such as an EC2 instance or an ELB load balancer, and is referred to by an IP address or a DNS domain name, depending on the record type.
Note
Creating latency and latency alias resource record sets in private hosted zones is not supported.
When Amazon Route 53 receives a DNS query for a domain name and type for which you have created latency resource record sets, Amazon Route 53 selects the latency resource record set that has the lowest latency between the end user and the associated Amazon EC2 Region. Amazon Route 53 then returns the value that is associated with the selected resource record set.
Note the following:
You can only specify one ResourceRecord per latency resource record set.
You can only create one latency resource record set for each Amazon EC2 Region.
You aren't required to create latency resource record sets for all Amazon EC2 Regions. Amazon Route 53 will choose the region with the best latency from among the regions that you create latency resource record sets for.
You can't create non-latency resource record sets that have the same values for the Name and Type elements as latency resource record sets.
GeoLocation (dict) --
Geo location resource record sets only: A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of 192.0.2.111 , create a resource record set with a Type of A and a ContinentCode of AF .
Note
Creating geolocation and geolocation alias resource record sets in private hosted zones is not supported.
If you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.
You can't create two geolocation resource record sets that specify the same geographic location.
The value * in the CountryCode element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the Name and Type elements.
Warning
Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Amazon Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of CountryCode is * , which handles both queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a * resource record set, Amazon Route 53 returns a "no answer" response for queries from those locations.
You can't create non-geolocation resource record sets that have the same values for the Name and Type elements as geolocation resource record sets.
ContinentCode (string) --The two-letter code for the continent.
Valid values: AF | AN | AS | EU | OC | NA | SA
Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode returns an InvalidInput error.
CountryCode (string) --The two-letter code for the country.
SubdivisionCode (string) --The code for the subdivision, for example, a state in the United States or a province in Canada.
Failover (string) --
Failover resource record sets only: To configure failover, you add the Failover element to two resource record sets. For one resource record set, you specify PRIMARY as the value for Failover ; for the other resource record set, you specify SECONDARY . In addition, you include the HealthCheckId element and specify the health check that you want Amazon Route 53 to perform for each resource record set.
Except where noted, the following failover behaviors assume that you have included the HealthCheckId element in both resource record sets:
When the primary resource record set is healthy, Amazon Route 53 responds to DNS queries with the applicable value from the primary resource record set regardless of the health of the secondary resource record set.
When the primary resource record set is unhealthy and the secondary resource record set is healthy, Amazon Route 53 responds to DNS queries with the applicable value from the secondary resource record set.
When the secondary resource record set is unhealthy, Amazon Route 53 responds to DNS queries with the applicable value from the primary resource record set regardless of the health of the primary resource record set.
If you omit the HealthCheckId element for the secondary resource record set, and if the primary resource record set is unhealthy, Amazon Route 53 always responds to DNS queries with the applicable value from the secondary resource record set. This is true regardless of the health of the associated endpoint.
You can't create non-failover resource record sets that have the same values for the Name and Type elements as failover resource record sets.
For failover alias resource record sets, you must also include the EvaluateTargetHealth element and set the value to true.
For more information about configuring failover for Amazon Route 53, see the following topics in the Amazon Route 53 Developer Guide :
Amazon Route 53 Health Checks and DNS Failover
Configuring Failover in a Private Hosted Zone
TTL (integer) --The resource record cache time to live (TTL), in seconds. Note the following:
If you're creating an alias resource record set, omit TTL . Amazon Route 53 uses the value of TTL for the alias target.
If you're associating this resource record set with a health check (if you're adding a HealthCheckId element), we recommend that you specify a TTL of 60 seconds or less so clients respond quickly to changes in health status.
All of the resource record sets in a group of weighted resource record sets must have the same value for TTL .
If a group of weighted resource record sets includes one or more weighted alias resource record sets for which the alias target is an ELB load balancer, we recommend that you specify a TTL of 60 seconds for all of the non-alias weighted resource record sets that have the same name and type. Values other than 60 seconds (the TTL for load balancers) will change the effect of the values that you specify for Weight .
ResourceRecords (list) --Information about the resource records to act upon.
Note
If you're creating an alias resource record set, omit ResourceRecords .
(dict) --Information specific to the resource record.
Note
If you're creating an alias resource record set, omit ResourceRecord .
Value (string) -- [REQUIRED]The current or new DNS record value, not to exceed 4,000 characters. In the case of a DELETE action, if the current value does not match the actual value, an error is returned. For descriptions about how to format Value for different record types, see Supported DNS Resource Record Types in the Amazon Route 53 Developer Guide .
You can specify more than one value for all record types except CNAME and SOA .
Note
If you're creating an alias resource record set, omit Value .
AliasTarget (dict) --
Alias resource record sets only: Information about the CloudFront distribution, AWS Elastic Beanstalk environment, ELB load balancer, Amazon S3 bucket, or Amazon Route 53 resource record set to which you're redirecting queries. The AWS Elastic Beanstalk environment must have a regionalized subdomain.
If you're creating resource records sets for a private hosted zone, note the following:
You can't create alias resource record sets for CloudFront distributions in a private hosted zone.
Creating geolocation alias resource record sets or latency alias resource record sets in a private hosted zone is unsupported.
For information about creating failover resource record sets in a private hosted zone, see Configuring Failover in a Private Hosted Zone in the Amazon Route 53 Developer Guide .
HostedZoneId (string) -- [REQUIRED]
Alias resource records sets only : The value used depends on where you want to route traffic:
CloudFront distribution
Specify Z2FDTNDATAQYW2 .
Note
Alias resource record sets for CloudFront can't be created in a private zone.
Elastic Beanstalk environment
Specify the hosted zone ID for the region in which you created the environment. The environment must have a regionalized subdomain. For a list of regions and the corresponding hosted zone IDs, see AWS Elastic Beanstalk in the "AWS Regions and Endpoints" chapter of the Amazon Web Services General Reference .
ELB load balancer
Specify the value of the hosted zone ID for the load balancer. Use the following methods to get the hosted zone ID:
Elastic Load Balancing table in the "AWS Regions and Endpoints" chapter of the Amazon Web Services General Reference : Use the value in the "Amazon Route 53 Hosted Zone ID" column that corresponds with the region that you created your load balancer in.
AWS Management Console : Go to the Amazon EC2 page, click Load Balancers in the navigation pane, select the load balancer, and get the value of the Hosted zone field on the Description tab.
Elastic Load Balancing API : Use DescribeLoadBalancers to get the value of CanonicalHostedZoneNameId . For more information, see the applicable guide:
Classic Load Balancer: DescribeLoadBalancers
Application Load Balancer: DescribeLoadBalancers
AWS CLI : Use `` describe-load-balancers `` to get the value of CanonicalHostedZoneNameID .An Amazon S3 bucket configured as a static website
Specify the hosted zone ID for the region that you created the bucket in. For more information about valid values, see the Amazon Simple Storage Service Website Endpoints table in the "AWS Regions and Endpoints" chapter of the Amazon Web Services General Reference .
Another Amazon Route 53 resource record set in your hosted zone
Specify the hosted zone ID of your hosted zone. (An alias resource record set can't reference a resource record set in a different hosted zone.)
DNSName (string) -- [REQUIRED]
Alias resource record sets only: The value that you specify depends on where you want to route queries:
CloudFront distribution
Specify the domain name that CloudFront assigned when you created your distribution.
Your CloudFront distribution must include an alternate domain name that matches the name of the resource record set. For example, if the name of the resource record set is acme.example.com , your CloudFront distribution must include acme.example.com as one of the alternate domain names. For more information, see Using Alternate Domain Names (CNAMEs) in the Amazon CloudFront Developer Guide .
Elastic Beanstalk environment
Specify the CNAME attribute for the environment. (The environment must have a regionalized domain name.) You can use the following methods to get the value of the CNAME attribute:
AWS Management Console : For information about how to get the value by using the console, see Using Custom Domains with AWS Elastic Beanstalk in the AWS Elastic Beanstalk Developer Guide .
Elastic Beanstalk API : Use the DescribeEnvironments action to get the value of the CNAME attribute. For more information, see DescribeEnvironments in the AWS Elastic Beanstalk API Reference .
AWS CLI : Use the describe-environments command to get the value of the CNAME attribute. For more information, see describe-environments in the AWS Command Line Interface Reference .ELB load balancer
Specify the DNS name that is associated with the load balancer. Get the DNS name by using the AWS Management Console, the ELB API, or the AWS CLI.
AWS Management Console : Go to the EC2 page, choose Load Balancers in the navigation pane, choose the load balancer, choose the Description tab, and get the value of the DNS name field. (If you're routing traffic to a Classic Load Balancer, get the value that begins with dualstack .)
Elastic Load Balancing API : Use DescribeLoadBalancers to get the value of DNSName . For more information, see the applicable guide:
Classic Load Balancer: DescribeLoadBalancers
Application Load Balancer: DescribeLoadBalancers
AWS CLI : Use `` describe-load-balancers `` to get the value of DNSName .Amazon S3 bucket that is configured as a static website
Specify the domain name of the Amazon S3 website endpoint in which you created the bucket, for example, s3-website-us-east-2.amazonaws.com . For more information about valid values, see the table Amazon Simple Storage Service (S3) Website Endpoints in the Amazon Web Services General Reference . For more information about using S3 buckets for websites, see Getting Started with Amazon Route 53 in the Amazon Route 53 Developer Guide.
Another Amazon Route 53 resource record set
Specify the value of the Name element for a resource record set in the current hosted zone.
EvaluateTargetHealth (boolean) -- [REQUIRED]
Applies only to alias, failover alias, geolocation alias, latency alias, and weighted alias resource record sets: When EvaluateTargetHealth is true , an alias resource record set inherits the health of the referenced AWS resource, such as an ELB load balancer, or the referenced resource record set.
Note the following:
You can't set EvaluateTargetHealth to true when the alias target is a CloudFront distribution.
If the AWS resource that you specify in AliasTarget is a resource record set or a group of resource record sets (for example, a group of weighted resource record sets), but it is not another alias resource record set, we recommend that you associate a health check with all of the resource record sets in the alias target. For more information, see What Happens When You Omit Health Checks? in the Amazon Route 53 Developer Guide .
If you specify an Elastic Beanstalk environment in HostedZoneId and DNSName , and if the environment contains an ELB load balancer, Elastic Load Balancing routes queries only to the healthy Amazon EC2 instances that are registered with the load balancer. (An environment automatically contains an ELB load balancer if it includes more than one EC2 instance.) If you set EvaluateTargetHealth to true and either no EC2 instances are healthy or the load balancer itself is unhealthy, Amazon Route 53 routes queries to other available resources that are healthy, if any. If the environment contains a single EC2 instance, there are no special requirements.
If you specify an ELB load balancer in `` AliasTarget `` , ELB routes queries only to the healthy EC2 instances that are registered with the load balancer. If no EC2 instances are healthy or if the load balancer itself is unhealthy, and if EvaluateTargetHealth is true for the corresponding alias resource record set, Amazon Route 53 routes queries to other resources. When you create a load balancer, you configure settings for ELB health checks; they're not Amazon Route 53 health checks, but they perform a similar function. Do not create Amazon Route 53 health checks for the EC2 instances that you register with an ELB load balancer. For more information, see How Health Checks Work in More Complex Amazon Route 53 Configurations in the Amazon Route 53 Developer Guide .
We recommend that you set EvaluateTargetHealth to true only when you have enough idle capacity to handle the failure of one or more endpoints.
For more information and examples, see Amazon Route 53 Health Checks and DNS Failover in the Amazon Route 53 Developer Guide .
HealthCheckId (string) --If you want Amazon Route 53 to return this resource record set in response to a DNS query only when a health check is passing, include the HealthCheckId element and specify the ID of the applicable health check.
Amazon Route 53 determines whether a resource record set is healthy based on one of the following:
By periodically sending a request to the endpoint that is specified in the health check
By aggregating the status of a specified group of health checks (calculated health checks)
By determining the current state of a CloudWatch alarm (CloudWatch metric health checks)
For more information, see How Amazon Route 53 Determines Whether an Endpoint Is Healthy .
The HealthCheckId element is only useful when Amazon Route 53 is choosing between two or more resource record sets to respond to a DNS query, and you want Amazon Route 53 to base the choice in part on the status of a health check. Configuring health checks only makes sense in the following configurations:
You're checking the health of the resource record sets in a group of weighted, latency, geolocation, or failover resource record sets, and you specify health check IDs for all of the resource record sets. If the health check for one resource record set specifies an endpoint that is not healthy, Amazon Route 53 stops responding to queries using the value for that resource record set.
You set EvaluateTargetHealth to true for the resource record sets in a group of alias, weighted alias, latency alias, geolocation alias, or failover alias resource record sets, and you specify health check IDs for all of the resource record sets that are referenced by the alias resource record sets.
Warning
Amazon Route 53 doesn't check the health of the endpoint specified in the resource record set, for example, the endpoint specified by the IP address in the Value element. When you add a HealthCheckId element to a resource record set, Amazon Route 53 checks the health of the endpoint that you specified in the health check.
For geolocation resource record sets, if an endpoint is unhealthy, Amazon Route 53 looks for a resource record set for the larger, associated geographic region. For example, suppose you have resource record sets for a state in the United States, for the United States, for North America, and for all locations. If the endpoint for the state resource record set is unhealthy, Amazon Route 53 checks the resource record sets for the United States, for North America, and for all locations (a resource record set for which the value of CountryCode is * ), in that order, until it finds a resource record set for which the endpoint is healthy.
If your health checks specify the endpoint only by domain name, we recommend that you create a separate health check for each endpoint. For example, create a health check for each HTTP server that is serving content for www.example.com . For the value of FullyQualifiedDomainName , specify the domain name of the server (such as us-east-2-www.example.com ), not the name of the resource record sets (example.com).
Warning
n this configuration, if you create a health check for which the value of FullyQualifiedDomainName matches the name of the resource record sets and then associate the health check with those resource record sets, health check results will be unpredictable.
For more information, see the following topics in the Amazon Route 53 Developer Guide :
Amazon Route 53 Health Checks and DNS Failover
Configuring Failover in a Private Hosted Zone
TrafficPolicyInstanceId (string) --When you create a traffic policy instance, Amazon Route 53 automatically creates a resource record set. TrafficPolicyInstanceId is the ID of the traffic policy instance that Amazon Route 53 created this resource record set for.
Warning
To delete the resource record set that is associated with a traffic policy instance, use DeleteTrafficPolicyInstance . Amazon Route 53 will delete the resource record set automatically. If you delete the resource record set by using ChangeResourceRecordSets , Amazon Route 53 doesn't automatically delete the traffic policy instance, and you'll continue to be charged for it even though it's no longer in use.
"""
pass
def change_tags_for_resource(ResourceType=None, ResourceId=None, AddTags=None, RemoveTagKeys=None):
"""
Adds, edits, or deletes tags for a health check or a hosted zone.
For information about using tags for cost allocation, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .
See also: AWS API Documentation
:example: response = client.change_tags_for_resource(
ResourceType='healthcheck'|'hostedzone',
ResourceId='string',
AddTags=[
{
'Key': 'string',
'Value': 'string'
},
],
RemoveTagKeys=[
'string',
]
)
:type ResourceType: string
:param ResourceType: [REQUIRED]
The type of the resource.
The resource type for health checks is healthcheck .
The resource type for hosted zones is hostedzone .
:type ResourceId: string
:param ResourceId: [REQUIRED]
The ID of the resource for which you want to add, change, or delete tags.
:type AddTags: list
:param AddTags: A complex type that contains a list of the tags that you want to add to the specified health check or hosted zone and/or the tags that you want to edit Value for.
You can add a maximum of 10 tags to a health check or a hosted zone.
(dict) --A complex type that contains information about a tag that you want to add or edit for the specified health check or hosted zone.
Key (string) --The value of Key depends on the operation that you want to perform:
Add a tag to a health check or hosted zone : Key is the name that you want to give the new tag.
Edit a tag : Key is the name of the tag that you want to change the Value for.
Delete a key : Key is the name of the tag you want to remove.
Give a name to a health check : Edit the default Name tag. In the Amazon Route 53 console, the list of your health checks includes a Name column that lets you see the name that you've given to each health check.
Value (string) --The value of Value depends on the operation that you want to perform:
Add a tag to a health check or hosted zone : Value is the value that you want to give the new tag.
Edit a tag : Value is the new value that you want to assign the tag.
:type RemoveTagKeys: list
:param RemoveTagKeys: A complex type that contains a list of the tags that you want to delete from the specified health check or hosted zone. You can specify up to 10 keys.
(string) --
:rtype: dict
:return: {}
"""
pass
def create_health_check(CallerReference=None, HealthCheckConfig=None):
"""
Creates a new health check.
For information about adding health checks to resource record sets, see ResourceRecordSet$HealthCheckId in ChangeResourceRecordSets .
If you're registering EC2 instances with an Elastic Load Balancing (ELB) load balancer, do not create Amazon Route 53 health checks for the EC2 instances. When you register an EC2 instance with a load balancer, you configure settings for an ELB health check, which performs a similar function to an Amazon Route 53 health check.
You can associate health checks with failover resource record sets in a private hosted zone. Note the following:
See also: AWS API Documentation
:example: response = client.create_health_check(
CallerReference='string',
HealthCheckConfig={
'IPAddress': 'string',
'Port': 123,
'Type': 'HTTP'|'HTTPS'|'HTTP_STR_MATCH'|'HTTPS_STR_MATCH'|'TCP'|'CALCULATED'|'CLOUDWATCH_METRIC',
'ResourcePath': 'string',
'FullyQualifiedDomainName': 'string',
'SearchString': 'string',
'RequestInterval': 123,
'FailureThreshold': 123,
'MeasureLatency': True|False,
'Inverted': True|False,
'HealthThreshold': 123,
'ChildHealthChecks': [
'string',
],
'EnableSNI': True|False,
'Regions': [
'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1',
],
'AlarmIdentifier': {
'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-central-1'|'eu-west-1'|'eu-west-2'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1',
'Name': 'string'
},
'InsufficientDataHealthStatus': 'Healthy'|'Unhealthy'|'LastKnownStatus'
}
)
:type CallerReference: string
:param CallerReference: [REQUIRED]
A unique string that identifies the request and that allows you to retry a failed CreateHealthCheck request without the risk of creating two identical health checks:
If you send a CreateHealthCheck request with the same CallerReference and settings as a previous request, and if the health check doesn't exist, Amazon Route 53 creates the health check. If the health check does exist, Amazon Route 53 returns the settings for the existing health check.
If you send a CreateHealthCheck request with the same CallerReference as a deleted health check, regardless of the settings, Amazon Route 53 returns a HealthCheckAlreadyExists error.
If you send a CreateHealthCheck request with the same CallerReference as an existing health check but with different settings, Amazon Route 53 returns a HealthCheckAlreadyExists error.
If you send a CreateHealthCheck request with a unique CallerReference but settings identical to an existing health check, Amazon Route 53 creates the health check.
:type HealthCheckConfig: dict
:param HealthCheckConfig: [REQUIRED]
A complex type that contains the response to a CreateHealthCheck request.
IPAddress (string) --The IPv4 or IPv6 IP address of the endpoint that you want Amazon Route 53 to perform health checks on. If you don't specify a value for IPAddress , Amazon Route 53 sends a DNS request to resolve the domain name that you specify in FullyQualifiedDomainName at the interval that you specify in RequestInterval . Using an IP address returned by DNS, Amazon Route 53 then checks the health of the endpoint.
Use one of the following formats for the value of IPAddress :
IPv4 address : four values between 0 and 255, separated by periods (.), for example, 192.0.2.44 .
IPv6 address : eight groups of four hexadecimal values, separated by colons (:), for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 . You can also shorten IPv6 addresses as described in RFC 5952, for example, 2001:db8:85a3::abcd:1:2345 .
If the endpoint is an EC2 instance, we recommend that you create an Elastic IP address, associate it with your EC2 instance, and specify the Elastic IP address for IPAddress . This ensures that the IP address of your instance will never change.
For more information, see HealthCheckConfig$FullyQualifiedDomainName .
Constraints: Amazon Route 53 can't check the health of endpoints for which the IP address is in local, private, non-routable, or multicast ranges. For more information about IP addresses for which you can't create health checks, see the following documents:
RFC 5735, Special Use IPv4 Addresses
RFC 6598, IANA-Reserved IPv4 Prefix for Shared Address Space
RFC 5156, Special-Use IPv6 Addresses
When the value of Type is CALCULATED or CLOUDWATCH_METRIC , omit IPAddress .
Port (integer) --The port on the endpoint on which you want Amazon Route 53 to perform health checks. Specify a value for Port only when you specify a value for IPAddress .
Type (string) -- [REQUIRED]The type of health check that you want to create, which indicates how Amazon Route 53 determines whether an endpoint is healthy.
Warning
You can't change the value of Type after you create a health check.
You can create the following types of health checks:
HTTP : Amazon Route 53 tries to establish a TCP connection. If successful, Amazon Route 53 submits an HTTP request and waits for an HTTP status code of 200 or greater and less than 400.
HTTPS : Amazon Route 53 tries to establish a TCP connection. If successful, Amazon Route 53 submits an HTTPS request and waits for an HTTP status code of 200 or greater and less than 400.
Warning
If you specify HTTPS for the value of Type , the endpoint must support TLS v1.0 or later.
HTTP_STR_MATCH : Amazon Route 53 tries to establish a TCP connection. If successful, Amazon Route 53 submits an HTTP request and searches the first 5,120 bytes of the response body for the string that you specify in SearchString .
HTTPS_STR_MATCH : Amazon Route 53 tries to establish a TCP connection. If successful, Amazon Route 53 submits an HTTPS request and searches the first 5,120 bytes of the response body for the string that you specify in SearchString .
TCP : Amazon Route 53 tries to establish a TCP connection.
CLOUDWATCH_METRIC : The health check is associated with a CloudWatch alarm. If the state of the alarm is OK , the health check is considered healthy. If the state is ALARM , the health check is considered unhealthy. If CloudWatch doesn't have sufficient data to determine whether the state is OK or ALARM , the health check status depends on the setting for InsufficientDataHealthStatus : Healthy , Unhealthy , or LastKnownStatus .
CALCULATED : For health checks that monitor the status of other health checks, Amazon Route 53 adds up the number of health checks that Amazon Route 53 health checkers consider to be healthy and compares that number with the value of HealthThreshold .
For more information, see How Amazon Route 53 Determines Whether an Endpoint Is Healthy in the Amazon Route 53 Developer Guide .
ResourcePath (string) --The path, if any, that you want Amazon Route 53 to request when performing health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, for example, the file /docs/route53-health-check.html.
FullyQualifiedDomainName (string) --Amazon Route 53 behavior depends on whether you specify a value for IPAddress .
If you specify a value for IPAddress :
Amazon Route 53 sends health check requests to the specified IPv4 or IPv6 address and passes the value of FullyQualifiedDomainName in the Host header for all health checks except TCP health checks. This is typically the fully qualified DNS name of the endpoint on which you want Amazon Route 53 to perform health checks.
When Amazon Route 53 checks the health of an endpoint, here is how it constructs the Host header:
If you specify a value of 80 for Port and HTTP or HTTP_STR_MATCH for Type , Amazon Route 53 passes the value of FullyQualifiedDomainName to the endpoint in the Host header.
If you specify a value of 443 for Port and HTTPS or HTTPS_STR_MATCH for Type , Amazon Route 53 passes the value of FullyQualifiedDomainName to the endpoint in the Host header.
If you specify another value for Port and any value except TCP for Type , Amazon Route 53 passes FullyQualifiedDomainName:Port to the endpoint in the Host header.
If you don't specify a value for FullyQualifiedDomainName , Amazon Route 53 substitutes the value of IPAddress in the Host header in each of the preceding cases.
**If you don't specify a value for IPAddress ** :
Amazon Route 53 sends a DNS request to the domain that you specify for FullyQualifiedDomainName at the interval that you specify for RequestInterval . Using an IPv4 address that DNS returns, Amazon Route 53 then checks the health of the endpoint.
Note
If you don't specify a value for IPAddress , Amazon Route 53 uses only IPv4 to send health checks to the endpoint. If there's no resource record set with a type of A for the name that you specify for FullyQualifiedDomainName , the health check fails with a 'DNS resolution failed' error.
If you want to check the health of weighted, latency, or failover resource record sets and you choose to specify the endpoint only by FullyQualifiedDomainName , we recommend that you create a separate health check for each endpoint. For example, create a health check for each HTTP server that is serving content for www.example.com. For the value of FullyQualifiedDomainName , specify the domain name of the server (such as us-east-2-www.example.com), not the name of the resource record sets (www.example.com).
Warning
In this configuration, if you create a health check for which the value of FullyQualifiedDomainName matches the name of the resource record sets and you then associate the health check with those resource record sets, health check results will be unpredictable.
In addition, if the value that you specify for Type is HTTP , HTTPS , HTTP_STR_MATCH , or HTTPS_STR_MATCH , Amazon Route 53 passes the value of FullyQualifiedDomainName in the Host header, as it does when you specify a value for IPAddress . If the value of Type is TCP , Amazon Route 53 doesn't pass a Host header.
SearchString (string) --If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH , the string that you want Amazon Route 53 to search for in the response body from the specified resource. If the string appears in the response body, Amazon Route 53 considers the resource healthy.
Amazon Route 53 considers case when searching for SearchString in the response body.
RequestInterval (integer) --The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health check request. Each Amazon Route 53 health checker makes requests at this interval.
Warning
You can't change the value of RequestInterval after you create a health check.
If you don't specify a value for RequestInterval , the default value is 30 seconds.
FailureThreshold (integer) --The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. For more information, see How Amazon Route 53 Determines Whether an Endpoint Is Healthy in the Amazon Route 53 Developer Guide .
If you don't specify a value for FailureThreshold , the default value is three health checks.
MeasureLatency (boolean) --Specify whether you want Amazon Route 53 to measure the latency between health checkers in multiple AWS regions and your endpoint, and to display CloudWatch latency graphs on the Health Checks page in the Amazon Route 53 console.
Warning
You can't change the value of MeasureLatency after you create a health check.
Inverted (boolean) --Specify whether you want Amazon Route 53 to invert the status of a health check, for example, to consider a health check unhealthy when it otherwise would be considered healthy.
HealthThreshold (integer) --The number of child health checks that are associated with a CALCULATED health that Amazon Route 53 must consider healthy for the CALCULATED health check to be considered healthy. To specify the child health checks that you want to associate with a CALCULATED health check, use the HealthCheckConfig$ChildHealthChecks and HealthCheckConfig$ChildHealthChecks elements.
Note the following:
If you specify a number greater than the number of child health checks, Amazon Route 53 always considers this health check to be unhealthy.
If you specify 0 , Amazon Route 53 always considers this health check to be healthy.
ChildHealthChecks (list) --(CALCULATED Health Checks Only) A complex type that contains one ChildHealthCheck element for each health check that you want to associate with a CALCULATED health check.
(string) --
EnableSNI (boolean) --Specify whether you want Amazon Route 53 to send the value of FullyQualifiedDomainName to the endpoint in the client_hello message during TLS negotiation. This allows the endpoint to respond to HTTPS health check requests with the applicable SSL/TLS certificate.
Some endpoints require that HTTPS requests include the host name in the client_hello message. If you don't enable SNI, the status of the health check will be SSL alert handshake_failure . A health check can also have that status for other reasons. If SNI is enabled and you're still getting the error, check the SSL/TLS configuration on your endpoint and confirm that your certificate is valid.
The SSL/TLS certificate on your endpoint includes a domain name in the Common Name field and possibly several more in the Subject Alternative Names field. One of the domain names in the certificate should match the value that you specify for FullyQualifiedDomainName . If the endpoint responds to the client_hello message with a certificate that does not include the domain name that you specified in FullyQualifiedDomainName , a health checker will retry the handshake. In the second attempt, the health checker will omit FullyQualifiedDomainName from the client_hello message.
Regions (list) --A complex type that contains one Region element for each region from which you want Amazon Route 53 health checkers to check the specified endpoint.
If you don't specify any regions, Amazon Route 53 health checkers automatically performs checks from all of the regions that are listed under Valid Values .
If you update a health check to remove a region that has been performing health checks, Amazon Route 53 will briefly continue to perform checks from that region to ensure that some health checkers are always checking the endpoint (for example, if you replace three regions with four different regions).
(string) --
AlarmIdentifier (dict) --A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy.
Region (string) -- [REQUIRED]A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy.
For the current list of CloudWatch regions, see Amazon CloudWatch in the AWS Regions and Endpoints chapter of the Amazon Web Services General Reference .
Name (string) -- [REQUIRED]The name of the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy.
InsufficientDataHealthStatus (string) --When CloudWatch has insufficient data about the metric to determine the alarm state, the status that you want Amazon Route 53 to assign to the health check:
Healthy : Amazon Route 53 considers the health check to be healthy.
Unhealthy : Amazon Route 53 considers the health check to be unhealthy.
LastKnownStatus : Amazon Route 53 uses the status of the health check from the last time that CloudWatch had sufficient data to determine the alarm state. For new health checks that have no last known status, the default status for the health check is healthy.
:rtype: dict
:return: {
'HealthCheck': {
'Id': 'string',
'CallerReference': 'string',
'HealthCheckConfig': {
'IPAddress': 'string',
'Port': 123,
'Type': 'HTTP'|'HTTPS'|'HTTP_STR_MATCH'|'HTTPS_STR_MATCH'|'TCP'|'CALCULATED'|'CLOUDWATCH_METRIC',
'ResourcePath': 'string',
'FullyQualifiedDomainName': 'string',
'SearchString': 'string',
'RequestInterval': 123,
'FailureThreshold': 123,
'MeasureLatency': True|False,
'Inverted': True|False,
'HealthThreshold': 123,
'ChildHealthChecks': [
'string',
],
'EnableSNI': True|False,
'Regions': [
'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1',
],
'AlarmIdentifier': {
'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-central-1'|'eu-west-1'|'eu-west-2'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1',
'Name': 'string'
},
'InsufficientDataHealthStatus': 'Healthy'|'Unhealthy'|'LastKnownStatus'
},
'HealthCheckVersion': 123,
'CloudWatchAlarmConfiguration': {
'EvaluationPeriods': 123,
'Threshold': 123.0,
'ComparisonOperator': 'GreaterThanOrEqualToThreshold'|'GreaterThanThreshold'|'LessThanThreshold'|'LessThanOrEqualToThreshold',
'Period': 123,
'MetricName': 'string',
'Namespace': 'string',
'Statistic': 'Average'|'Sum'|'SampleCount'|'Maximum'|'Minimum',
'Dimensions': [
{
'Name': 'string',
'Value': 'string'
},
]
}
},
'Location': 'string'
}
:returns:
CallerReference (string) -- [REQUIRED]
A unique string that identifies the request and that allows you to retry a failed CreateHealthCheck request without the risk of creating two identical health checks:
If you send a CreateHealthCheck request with the same CallerReference and settings as a previous request, and if the health check doesn't exist, Amazon Route 53 creates the health check. If the health check does exist, Amazon Route 53 returns the settings for the existing health check.
If you send a CreateHealthCheck request with the same CallerReference as a deleted health check, regardless of the settings, Amazon Route 53 returns a HealthCheckAlreadyExists error.
If you send a CreateHealthCheck request with the same CallerReference as an existing health check but with different settings, Amazon Route 53 returns a HealthCheckAlreadyExists error.
If you send a CreateHealthCheck request with a unique CallerReference but settings identical to an existing health check, Amazon Route 53 creates the health check.
HealthCheckConfig (dict) -- [REQUIRED]
A complex type that contains the response to a CreateHealthCheck request.
IPAddress (string) --The IPv4 or IPv6 IP address of the endpoint that you want Amazon Route 53 to perform health checks on. If you don't specify a value for IPAddress , Amazon Route 53 sends a DNS request to resolve the domain name that you specify in FullyQualifiedDomainName at the interval that you specify in RequestInterval . Using an IP address returned by DNS, Amazon Route 53 then checks the health of the endpoint.
Use one of the following formats for the value of IPAddress :
IPv4 address : four values between 0 and 255, separated by periods (.), for example, 192.0.2.44 .
IPv6 address : eight groups of four hexadecimal values, separated by colons (:), for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 . You can also shorten IPv6 addresses as described in RFC 5952, for example, 2001:db8:85a3::abcd:1:2345 .
If the endpoint is an EC2 instance, we recommend that you create an Elastic IP address, associate it with your EC2 instance, and specify the Elastic IP address for IPAddress . This ensures that the IP address of your instance will never change.
For more information, see HealthCheckConfig$FullyQualifiedDomainName .
Constraints: Amazon Route 53 can't check the health of endpoints for which the IP address is in local, private, non-routable, or multicast ranges. For more information about IP addresses for which you can't create health checks, see the following documents:
RFC 5735, Special Use IPv4 Addresses
RFC 6598, IANA-Reserved IPv4 Prefix for Shared Address Space
RFC 5156, Special-Use IPv6 Addresses
When the value of Type is CALCULATED or CLOUDWATCH_METRIC , omit IPAddress .
Port (integer) --The port on the endpoint on which you want Amazon Route 53 to perform health checks. Specify a value for Port only when you specify a value for IPAddress .
Type (string) -- [REQUIRED]The type of health check that you want to create, which indicates how Amazon Route 53 determines whether an endpoint is healthy.
Warning
You can't change the value of Type after you create a health check.
You can create the following types of health checks:
HTTP : Amazon Route 53 tries to establish a TCP connection. If successful, Amazon Route 53 submits an HTTP request and waits for an HTTP status code of 200 or greater and less than 400.
HTTPS : Amazon Route 53 tries to establish a TCP connection. If successful, Amazon Route 53 submits an HTTPS request and waits for an HTTP status code of 200 or greater and less than 400.
Warning
If you specify HTTPS for the value of Type , the endpoint must support TLS v1.0 or later.
HTTP_STR_MATCH : Amazon Route 53 tries to establish a TCP connection. If successful, Amazon Route 53 submits an HTTP request and searches the first 5,120 bytes of the response body for the string that you specify in SearchString .
HTTPS_STR_MATCH : Amazon Route 53 tries to establish a TCP connection. If successful, Amazon Route 53 submits an HTTPS request and searches the first 5,120 bytes of the response body for the string that you specify in SearchString .
TCP : Amazon Route 53 tries to establish a TCP connection.
CLOUDWATCH_METRIC : The health check is associated with a CloudWatch alarm. If the state of the alarm is OK , the health check is considered healthy. If the state is ALARM , the health check is considered unhealthy. If CloudWatch doesn't have sufficient data to determine whether the state is OK or ALARM , the health check status depends on the setting for InsufficientDataHealthStatus : Healthy , Unhealthy , or LastKnownStatus .
CALCULATED : For health checks that monitor the status of other health checks, Amazon Route 53 adds up the number of health checks that Amazon Route 53 health checkers consider to be healthy and compares that number with the value of HealthThreshold .
For more information, see How Amazon Route 53 Determines Whether an Endpoint Is Healthy in the Amazon Route 53 Developer Guide .
ResourcePath (string) --The path, if any, that you want Amazon Route 53 to request when performing health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, for example, the file /docs/route53-health-check.html.
FullyQualifiedDomainName (string) --Amazon Route 53 behavior depends on whether you specify a value for IPAddress .
If you specify a value for IPAddress :
Amazon Route 53 sends health check requests to the specified IPv4 or IPv6 address and passes the value of FullyQualifiedDomainName in the Host header for all health checks except TCP health checks. This is typically the fully qualified DNS name of the endpoint on which you want Amazon Route 53 to perform health checks.
When Amazon Route 53 checks the health of an endpoint, here is how it constructs the Host header:
If you specify a value of 80 for Port and HTTP or HTTP_STR_MATCH for Type , Amazon Route 53 passes the value of FullyQualifiedDomainName to the endpoint in the Host header.
If you specify a value of 443 for Port and HTTPS or HTTPS_STR_MATCH for Type , Amazon Route 53 passes the value of FullyQualifiedDomainName to the endpoint in the Host header.
If you specify another value for Port and any value except TCP for Type , Amazon Route 53 passes FullyQualifiedDomainName:Port to the endpoint in the Host header.
If you don't specify a value for FullyQualifiedDomainName , Amazon Route 53 substitutes the value of IPAddress in the Host header in each of the preceding cases.
**If you don't specify a value for IPAddress ** :
Amazon Route 53 sends a DNS request to the domain that you specify for FullyQualifiedDomainName at the interval that you specify for RequestInterval . Using an IPv4 address that DNS returns, Amazon Route 53 then checks the health of the endpoint.
Note
If you don't specify a value for IPAddress , Amazon Route 53 uses only IPv4 to send health checks to the endpoint. If there's no resource record set with a type of A for the name that you specify for FullyQualifiedDomainName , the health check fails with a "DNS resolution failed" error.
If you want to check the health of weighted, latency, or failover resource record sets and you choose to specify the endpoint only by FullyQualifiedDomainName , we recommend that you create a separate health check for each endpoint. For example, create a health check for each HTTP server that is serving content for www.example.com. For the value of FullyQualifiedDomainName , specify the domain name of the server (such as us-east-2-www.example.com), not the name of the resource record sets (www.example.com).
Warning
In this configuration, if you create a health check for which the value of FullyQualifiedDomainName matches the name of the resource record sets and you then associate the health check with those resource record sets, health check results will be unpredictable.
In addition, if the value that you specify for Type is HTTP , HTTPS , HTTP_STR_MATCH , or HTTPS_STR_MATCH , Amazon Route 53 passes the value of FullyQualifiedDomainName in the Host header, as it does when you specify a value for IPAddress . If the value of Type is TCP , Amazon Route 53 doesn't pass a Host header.
SearchString (string) --If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH , the string that you want Amazon Route 53 to search for in the response body from the specified resource. If the string appears in the response body, Amazon Route 53 considers the resource healthy.
Amazon Route 53 considers case when searching for SearchString in the response body.
RequestInterval (integer) --The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health check request. Each Amazon Route 53 health checker makes requests at this interval.
Warning
You can't change the value of RequestInterval after you create a health check.
If you don't specify a value for RequestInterval , the default value is 30 seconds.
FailureThreshold (integer) --The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. For more information, see How Amazon Route 53 Determines Whether an Endpoint Is Healthy in the Amazon Route 53 Developer Guide .
If you don't specify a value for FailureThreshold , the default value is three health checks.
MeasureLatency (boolean) --Specify whether you want Amazon Route 53 to measure the latency between health checkers in multiple AWS regions and your endpoint, and to display CloudWatch latency graphs on the Health Checks page in the Amazon Route 53 console.
Warning
You can't change the value of MeasureLatency after you create a health check.
Inverted (boolean) --Specify whether you want Amazon Route 53 to invert the status of a health check, for example, to consider a health check unhealthy when it otherwise would be considered healthy.
HealthThreshold (integer) --The number of child health checks that are associated with a CALCULATED health that Amazon Route 53 must consider healthy for the CALCULATED health check to be considered healthy. To specify the child health checks that you want to associate with a CALCULATED health check, use the HealthCheckConfig$ChildHealthChecks and HealthCheckConfig$ChildHealthChecks elements.
Note the following:
If you specify a number greater than the number of child health checks, Amazon Route 53 always considers this health check to be unhealthy.
If you specify 0 , Amazon Route 53 always considers this health check to be healthy.
ChildHealthChecks (list) --(CALCULATED Health Checks Only) A complex type that contains one ChildHealthCheck element for each health check that you want to associate with a CALCULATED health check.
(string) --
EnableSNI (boolean) --Specify whether you want Amazon Route 53 to send the value of FullyQualifiedDomainName to the endpoint in the client_hello message during TLS negotiation. This allows the endpoint to respond to HTTPS health check requests with the applicable SSL/TLS certificate.
Some endpoints require that HTTPS requests include the host name in the client_hello message. If you don't enable SNI, the status of the health check will be SSL alert handshake_failure . A health check can also have that status for other reasons. If SNI is enabled and you're still getting the error, check the SSL/TLS configuration on your endpoint and confirm that your certificate is valid.
The SSL/TLS certificate on your endpoint includes a domain name in the Common Name field and possibly several more in the Subject Alternative Names field. One of the domain names in the certificate should match the value that you specify for FullyQualifiedDomainName . If the endpoint responds to the client_hello message with a certificate that does not include the domain name that you specified in FullyQualifiedDomainName , a health checker will retry the handshake. In the second attempt, the health checker will omit FullyQualifiedDomainName from the client_hello message.
Regions (list) --A complex type that contains one Region element for each region from which you want Amazon Route 53 health checkers to check the specified endpoint.
If you don't specify any regions, Amazon Route 53 health checkers automatically performs checks from all of the regions that are listed under Valid Values .
If you update a health check to remove a region that has been performing health checks, Amazon Route 53 will briefly continue to perform checks from that region to ensure that some health checkers are always checking the endpoint (for example, if you replace three regions with four different regions).
(string) --
AlarmIdentifier (dict) --A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy.
Region (string) -- [REQUIRED]A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy.
For the current list of CloudWatch regions, see Amazon CloudWatch in the AWS Regions and Endpoints chapter of the Amazon Web Services General Reference .
Name (string) -- [REQUIRED]The name of the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy.
InsufficientDataHealthStatus (string) --When CloudWatch has insufficient data about the metric to determine the alarm state, the status that you want Amazon Route 53 to assign to the health check:
Healthy : Amazon Route 53 considers the health check to be healthy.
Unhealthy : Amazon Route 53 considers the health check to be unhealthy.
LastKnownStatus : Amazon Route 53 uses the status of the health check from the last time that CloudWatch had sufficient data to determine the alarm state. For new health checks that have no last known status, the default status for the health check is healthy.
"""
pass
def create_hosted_zone(Name=None, VPC=None, CallerReference=None, HostedZoneConfig=None, DelegationSetId=None):
"""
Creates a new public hosted zone, which you use to specify how the Domain Name System (DNS) routes traffic on the Internet for a domain, such as example.com, and its subdomains.
For more information about charges for hosted zones, see Amazon Route 53 Pricing .
Note the following:
When you submit a CreateHostedZone request, the initial status of the hosted zone is PENDING . This means that the NS and SOA records are not yet available on all Amazon Route 53 DNS servers. When the NS and SOA records are available, the status of the zone changes to INSYNC .
See also: AWS API Documentation
:example: response = client.create_hosted_zone(
Name='string',
VPC={
'VPCRegion': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-west-2'|'eu-central-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-south-1'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1'|'ca-central-1'|'cn-north-1',
'VPCId': 'string'
},
CallerReference='string',
HostedZoneConfig={
'Comment': 'string',
'PrivateZone': True|False
},
DelegationSetId='string'
)
:type Name: string
:param Name: [REQUIRED]
The name of the domain. For resource record types that include a domain name, specify a fully qualified domain name, for example, www.example.com . The trailing dot is optional; Amazon Route 53 assumes that the domain name is fully qualified. This means that Amazon Route 53 treats www.example.com (without a trailing dot) and www.example.com. (with a trailing dot) as identical.
If you're creating a public hosted zone, this is the name you have registered with your DNS registrar. If your domain name is registered with a registrar other than Amazon Route 53, change the name servers for your domain to the set of NameServers that CreateHostedZone returns in DelegationSet .
:type VPC: dict
:param VPC: (Private hosted zones only) A complex type that contains information about the Amazon VPC that you're associating with this hosted zone.
You can specify only one Amazon VPC when you create a private hosted zone. To associate additional Amazon VPCs with the hosted zone, use AssociateVPCWithHostedZone after you create a hosted zone.
VPCRegion (string) --(Private hosted zones only) The region in which you created an Amazon VPC.
VPCId (string) --(Private hosted zones only) The ID of an Amazon VPC.
:type CallerReference: string
:param CallerReference: [REQUIRED]
A unique string that identifies the request and that allows failed CreateHostedZone requests to be retried without the risk of executing the operation twice. You must use a unique CallerReference string every time you submit a CreateHostedZone request. CallerReference can be any unique string, for example, a date/time stamp.
:type HostedZoneConfig: dict
:param HostedZoneConfig: (Optional) A complex type that contains the following optional values:
For public and private hosted zones, an optional comment
For private hosted zones, an optional PrivateZone element
If you don't specify a comment or the PrivateZone element, omit HostedZoneConfig and the other elements.
Comment (string) --Any comments that you want to include about the hosted zone.
PrivateZone (boolean) --A value that indicates whether this is a private hosted zone.
:type DelegationSetId: string
:param DelegationSetId: If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon Route 53 assigned to the reusable delegation set when you created it. For more information about reusable delegation sets, see CreateReusableDelegationSet .
:rtype: dict
:return: {
'HostedZone': {
'Id': 'string',
'Name': 'string',
'CallerReference': 'string',
'Config': {
'Comment': 'string',
'PrivateZone': True|False
},
'ResourceRecordSetCount': 123
},
'ChangeInfo': {
'Id': 'string',
'Status': 'PENDING'|'INSYNC',
'SubmittedAt': datetime(2015, 1, 1),
'Comment': 'string'
},
'DelegationSet': {
'Id': 'string',
'CallerReference': 'string',
'NameServers': [
'string',
]
},
'VPC': {
'VPCRegion': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-west-2'|'eu-central-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-south-1'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1'|'ca-central-1'|'cn-north-1',
'VPCId': 'string'
},
'Location': 'string'
}
:returns:
Name (string) -- [REQUIRED]
The name of the domain. For resource record types that include a domain name, specify a fully qualified domain name, for example, www.example.com . The trailing dot is optional; Amazon Route 53 assumes that the domain name is fully qualified. This means that Amazon Route 53 treats www.example.com (without a trailing dot) and www.example.com. (with a trailing dot) as identical.
If you're creating a public hosted zone, this is the name you have registered with your DNS registrar. If your domain name is registered with a registrar other than Amazon Route 53, change the name servers for your domain to the set of NameServers that CreateHostedZone returns in DelegationSet .
VPC (dict) -- (Private hosted zones only) A complex type that contains information about the Amazon VPC that you're associating with this hosted zone.
You can specify only one Amazon VPC when you create a private hosted zone. To associate additional Amazon VPCs with the hosted zone, use AssociateVPCWithHostedZone after you create a hosted zone.
VPCRegion (string) --(Private hosted zones only) The region in which you created an Amazon VPC.
VPCId (string) --(Private hosted zones only) The ID of an Amazon VPC.
CallerReference (string) -- [REQUIRED]
A unique string that identifies the request and that allows failed CreateHostedZone requests to be retried without the risk of executing the operation twice. You must use a unique CallerReference string every time you submit a CreateHostedZone request. CallerReference can be any unique string, for example, a date/time stamp.
HostedZoneConfig (dict) -- (Optional) A complex type that contains the following optional values:
For public and private hosted zones, an optional comment
For private hosted zones, an optional PrivateZone element
If you don't specify a comment or the PrivateZone element, omit HostedZoneConfig and the other elements.
Comment (string) --Any comments that you want to include about the hosted zone.
PrivateZone (boolean) --A value that indicates whether this is a private hosted zone.
DelegationSetId (string) -- If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon Route 53 assigned to the reusable delegation set when you created it. For more information about reusable delegation sets, see CreateReusableDelegationSet .
"""
pass
def create_reusable_delegation_set(CallerReference=None, HostedZoneId=None):
"""
Creates a delegation set (a group of four name servers) that can be reused by multiple hosted zones. If a hosted zoned ID is specified, CreateReusableDelegationSet marks the delegation set associated with that zone as reusable
For information on how to use a reusable delegation set to configure white label name servers, see Configuring White Label Name Servers .
See also: AWS API Documentation
:example: response = client.create_reusable_delegation_set(
CallerReference='string',
HostedZoneId='string'
)
:type CallerReference: string
:param CallerReference: [REQUIRED]
A unique string that identifies the request, and that allows you to retry failed CreateReusableDelegationSet requests without the risk of executing the operation twice. You must use a unique CallerReference string every time you submit a CreateReusableDelegationSet request. CallerReference can be any unique string, for example a date/time stamp.
:type HostedZoneId: string
:param HostedZoneId: If you want to mark the delegation set for an existing hosted zone as reusable, the ID for that hosted zone.
:rtype: dict
:return: {
'DelegationSet': {
'Id': 'string',
'CallerReference': 'string',
'NameServers': [
'string',
]
},
'Location': 'string'
}
:returns:
(string) --
"""
pass
def create_traffic_policy(Name=None, Document=None, Comment=None):
"""
Creates a traffic policy, which you use to create multiple DNS resource record sets for one domain name (such as example.com) or one subdomain name (such as www.example.com).
See also: AWS API Documentation
:example: response = client.create_traffic_policy(
Name='string',
Document='string',
Comment='string'
)
:type Name: string
:param Name: [REQUIRED]
The name of the traffic policy.
:type Document: string
:param Document: [REQUIRED]
The definition of this traffic policy in JSON format. For more information, see Traffic Policy Document Format .
:type Comment: string
:param Comment: (Optional) Any comments that you want to include about the traffic policy.
:rtype: dict
:return: {
'TrafficPolicy': {
'Id': 'string',
'Version': 123,
'Name': 'string',
'Type': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
'Document': 'string',
'Comment': 'string'
},
'Location': 'string'
}
"""
pass
def create_traffic_policy_instance(HostedZoneId=None, Name=None, TTL=None, TrafficPolicyId=None, TrafficPolicyVersion=None):
"""
Creates resource record sets in a specified hosted zone based on the settings in a specified traffic policy version. In addition, CreateTrafficPolicyInstance associates the resource record sets with a specified domain name (such as example.com) or subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for the domain or subdomain name by using the resource record sets that CreateTrafficPolicyInstance created.
See also: AWS API Documentation
:example: response = client.create_traffic_policy_instance(
HostedZoneId='string',
Name='string',
TTL=123,
TrafficPolicyId='string',
TrafficPolicyVersion=123
)
:type HostedZoneId: string
:param HostedZoneId: [REQUIRED]
The ID of the hosted zone in which you want Amazon Route 53 to create resource record sets by using the configuration in a traffic policy.
:type Name: string
:param Name: [REQUIRED]
The domain name (such as example.com) or subdomain name (such as www.example.com) for which Amazon Route 53 responds to DNS queries by using the resource record sets that Amazon Route 53 creates for this traffic policy instance.
:type TTL: integer
:param TTL: [REQUIRED]
(Optional) The TTL that you want Amazon Route 53 to assign to all of the resource record sets that it creates in the specified hosted zone.
:type TrafficPolicyId: string
:param TrafficPolicyId: [REQUIRED]
The ID of the traffic policy that you want to use to create resource record sets in the specified hosted zone.
:type TrafficPolicyVersion: integer
:param TrafficPolicyVersion: [REQUIRED]
The version of the traffic policy that you want to use to create resource record sets in the specified hosted zone.
:rtype: dict
:return: {
'TrafficPolicyInstance': {
'Id': 'string',
'HostedZoneId': 'string',
'Name': 'string',
'TTL': 123,
'State': 'string',
'Message': 'string',
'TrafficPolicyId': 'string',
'TrafficPolicyVersion': 123,
'TrafficPolicyType': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA'
},
'Location': 'string'
}
"""
pass
def create_traffic_policy_version(Id=None, Document=None, Comment=None):
"""
Creates a new version of an existing traffic policy. When you create a new version of a traffic policy, you specify the ID of the traffic policy that you want to update and a JSON-formatted document that describes the new version. You use traffic policies to create multiple DNS resource record sets for one domain name (such as example.com) or one subdomain name (such as www.example.com). You can create a maximum of 1000 versions of a traffic policy. If you reach the limit and need to create another version, you'll need to start a new traffic policy.
See also: AWS API Documentation
:example: response = client.create_traffic_policy_version(
Id='string',
Document='string',
Comment='string'
)
:type Id: string
:param Id: [REQUIRED]
The ID of the traffic policy for which you want to create a new version.
:type Document: string
:param Document: [REQUIRED]
The definition of this version of the traffic policy, in JSON format. You specified the JSON in the CreateTrafficPolicyVersion request. For more information about the JSON format, see CreateTrafficPolicy .
:type Comment: string
:param Comment: The comment that you specified in the CreateTrafficPolicyVersion request, if any.
:rtype: dict
:return: {
'TrafficPolicy': {
'Id': 'string',
'Version': 123,
'Name': 'string',
'Type': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
'Document': 'string',
'Comment': 'string'
},
'Location': 'string'
}
"""
pass
def create_vpc_association_authorization(HostedZoneId=None, VPC=None):
"""
Authorizes the AWS account that created a specified VPC to submit an AssociateVPCWithHostedZone request to associate the VPC with a specified hosted zone that was created by a different account. To submit a CreateVPCAssociationAuthorization request, you must use the account that created the hosted zone. After you authorize the association, use the account that created the VPC to submit an AssociateVPCWithHostedZone request.
See also: AWS API Documentation
:example: response = client.create_vpc_association_authorization(
HostedZoneId='string',
VPC={
'VPCRegion': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-west-2'|'eu-central-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-south-1'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1'|'ca-central-1'|'cn-north-1',
'VPCId': 'string'
}
)
:type HostedZoneId: string
:param HostedZoneId: [REQUIRED]
The ID of the private hosted zone that you want to authorize associating a VPC with.
:type VPC: dict
:param VPC: [REQUIRED]
A complex type that contains the VPC ID and region for the VPC that you want to authorize associating with your hosted zone.
VPCRegion (string) --(Private hosted zones only) The region in which you created an Amazon VPC.
VPCId (string) --(Private hosted zones only) The ID of an Amazon VPC.
:rtype: dict
:return: {
'HostedZoneId': 'string',
'VPC': {
'VPCRegion': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-west-2'|'eu-central-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-south-1'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1'|'ca-central-1'|'cn-north-1',
'VPCId': 'string'
}
}
"""
pass
def delete_health_check(HealthCheckId=None):
"""
Deletes a health check.
See also: AWS API Documentation
:example: response = client.delete_health_check(
HealthCheckId='string'
)
:type HealthCheckId: string
:param HealthCheckId: [REQUIRED]
The ID of the health check that you want to delete.
:rtype: dict
:return: {}
"""
pass
def delete_hosted_zone(Id=None):
"""
Deletes a hosted zone.
You can delete a hosted zone only if it contains only the default SOA record and NS resource record sets. If the hosted zone contains other resource record sets, you must delete them before you can delete the hosted zone. If you try to delete a hosted zone that contains other resource record sets, the request fails, and Amazon Route 53 returns a HostedZoneNotEmpty error. For information about deleting records from your hosted zone, see ChangeResourceRecordSets .
To verify that the hosted zone has been deleted, do one of the following:
See also: AWS API Documentation
:example: response = client.delete_hosted_zone(
Id='string'
)
:type Id: string
:param Id: [REQUIRED]
The ID of the hosted zone you want to delete.
:rtype: dict
:return: {
'ChangeInfo': {
'Id': 'string',
'Status': 'PENDING'|'INSYNC',
'SubmittedAt': datetime(2015, 1, 1),
'Comment': 'string'
}
}
"""
pass
def delete_reusable_delegation_set(Id=None):
"""
Deletes a reusable delegation set.
To verify that the reusable delegation set is not associated with any hosted zones, submit a GetReusableDelegationSet request and specify the ID of the reusable delegation set that you want to delete.
See also: AWS API Documentation
:example: response = client.delete_reusable_delegation_set(
Id='string'
)
:type Id: string
:param Id: [REQUIRED]
The ID of the reusable delegation set that you want to delete.
:rtype: dict
:return: {}
"""
pass
def delete_traffic_policy(Id=None, Version=None):
"""
Deletes a traffic policy.
See also: AWS API Documentation
:example: response = client.delete_traffic_policy(
Id='string',
Version=123
)
:type Id: string
:param Id: [REQUIRED]
The ID of the traffic policy that you want to delete.
:type Version: integer
:param Version: [REQUIRED]
The version number of the traffic policy that you want to delete.
:rtype: dict
:return: {}
"""
pass
def delete_traffic_policy_instance(Id=None):
"""
Deletes a traffic policy instance and all of the resource record sets that Amazon Route 53 created when you created the instance.
See also: AWS API Documentation
:example: response = client.delete_traffic_policy_instance(
Id='string'
)
:type Id: string
:param Id: [REQUIRED]
The ID of the traffic policy instance that you want to delete.
Warning
When you delete a traffic policy instance, Amazon Route 53 also deletes all of the resource record sets that were created when you created the traffic policy instance.
:rtype: dict
:return: {}
"""
pass
def delete_vpc_association_authorization(HostedZoneId=None, VPC=None):
"""
Removes authorization to submit an AssociateVPCWithHostedZone request to associate a specified VPC with a hosted zone that was created by a different account. You must use the account that created the hosted zone to submit a DeleteVPCAssociationAuthorization request.
See also: AWS API Documentation
:example: response = client.delete_vpc_association_authorization(
HostedZoneId='string',
VPC={
'VPCRegion': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-west-2'|'eu-central-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-south-1'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1'|'ca-central-1'|'cn-north-1',
'VPCId': 'string'
}
)
:type HostedZoneId: string
:param HostedZoneId: [REQUIRED]
When removing authorization to associate a VPC that was created by one AWS account with a hosted zone that was created with a different AWS account, the ID of the hosted zone.
:type VPC: dict
:param VPC: [REQUIRED]
When removing authorization to associate a VPC that was created by one AWS account with a hosted zone that was created with a different AWS account, a complex type that includes the ID and region of the VPC.
VPCRegion (string) --(Private hosted zones only) The region in which you created an Amazon VPC.
VPCId (string) --(Private hosted zones only) The ID of an Amazon VPC.
:rtype: dict
:return: {}
"""
pass
def disassociate_vpc_from_hosted_zone(HostedZoneId=None, VPC=None, Comment=None):
"""
Disassociates a VPC from a Amazon Route 53 private hosted zone.
See also: AWS API Documentation
:example: response = client.disassociate_vpc_from_hosted_zone(
HostedZoneId='string',
VPC={
'VPCRegion': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-west-2'|'eu-central-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-south-1'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1'|'ca-central-1'|'cn-north-1',
'VPCId': 'string'
},
Comment='string'
)
:type HostedZoneId: string
:param HostedZoneId: [REQUIRED]
The ID of the private hosted zone that you want to disassociate a VPC from.
:type VPC: dict
:param VPC: [REQUIRED]
A complex type that contains information about the VPC that you're disassociating from the specified hosted zone.
VPCRegion (string) --(Private hosted zones only) The region in which you created an Amazon VPC.
VPCId (string) --(Private hosted zones only) The ID of an Amazon VPC.
:type Comment: string
:param Comment: Optional: A comment about the disassociation request.
:rtype: dict
:return: {
'ChangeInfo': {
'Id': 'string',
'Status': 'PENDING'|'INSYNC',
'SubmittedAt': datetime(2015, 1, 1),
'Comment': 'string'
}
}
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_change(Id=None):
"""
Returns the current status of a change batch request. The status is one of the following values:
See also: AWS API Documentation
:example: response = client.get_change(
Id='string'
)
:type Id: string
:param Id: [REQUIRED]
The ID of the change batch request. The value that you specify here is the value that ChangeResourceRecordSets returned in the Id element when you submitted the request.
:rtype: dict
:return: {
'ChangeInfo': {
'Id': 'string',
'Status': 'PENDING'|'INSYNC',
'SubmittedAt': datetime(2015, 1, 1),
'Comment': 'string'
}
}
"""
pass
def get_checker_ip_ranges():
"""
See also: AWS API Documentation
:example: response = client.get_checker_ip_ranges()
:rtype: dict
:return: {
'CheckerIpRanges': [
'string',
]
}
"""
pass
def get_geo_location(ContinentCode=None, CountryCode=None, SubdivisionCode=None):
"""
Gets information about whether a specified geographic location is supported for Amazon Route 53 geolocation resource record sets.
Use the following syntax to determine whether a continent is supported for geolocation:
Use the following syntax to determine whether a country is supported for geolocation:
Use the following syntax to determine whether a subdivision of a country is supported for geolocation:
See also: AWS API Documentation
:example: response = client.get_geo_location(
ContinentCode='string',
CountryCode='string',
SubdivisionCode='string'
)
:type ContinentCode: string
:param ContinentCode: Amazon Route 53 supports the following continent codes:
AF : Africa
AN : Antarctica
AS : Asia
EU : Europe
OC : Oceania
NA : North America
SA : South America
:type CountryCode: string
:param CountryCode: Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1 alpha-2 .
:type SubdivisionCode: string
:param SubdivisionCode: Amazon Route 53 uses the one- to three-letter subdivision codes that are specified in ISO standard 3166-1 alpha-2 . Amazon Route 53 doesn't support subdivision codes for all countries. If you specify SubdivisionCode , you must also specify CountryCode .
:rtype: dict
:return: {
'GeoLocationDetails': {
'ContinentCode': 'string',
'ContinentName': 'string',
'CountryCode': 'string',
'CountryName': 'string',
'SubdivisionCode': 'string',
'SubdivisionName': 'string'
}
}
"""
pass
def get_health_check(HealthCheckId=None):
"""
Gets information about a specified health check.
See also: AWS API Documentation
:example: response = client.get_health_check(
HealthCheckId='string'
)
:type HealthCheckId: string
:param HealthCheckId: [REQUIRED]
The identifier that Amazon Route 53 assigned to the health check when you created it. When you add or update a resource record set, you use this value to specify which health check to use. The value can be up to 64 characters long.
:rtype: dict
:return: {
'HealthCheck': {
'Id': 'string',
'CallerReference': 'string',
'HealthCheckConfig': {
'IPAddress': 'string',
'Port': 123,
'Type': 'HTTP'|'HTTPS'|'HTTP_STR_MATCH'|'HTTPS_STR_MATCH'|'TCP'|'CALCULATED'|'CLOUDWATCH_METRIC',
'ResourcePath': 'string',
'FullyQualifiedDomainName': 'string',
'SearchString': 'string',
'RequestInterval': 123,
'FailureThreshold': 123,
'MeasureLatency': True|False,
'Inverted': True|False,
'HealthThreshold': 123,
'ChildHealthChecks': [
'string',
],
'EnableSNI': True|False,
'Regions': [
'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1',
],
'AlarmIdentifier': {
'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-central-1'|'eu-west-1'|'eu-west-2'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1',
'Name': 'string'
},
'InsufficientDataHealthStatus': 'Healthy'|'Unhealthy'|'LastKnownStatus'
},
'HealthCheckVersion': 123,
'CloudWatchAlarmConfiguration': {
'EvaluationPeriods': 123,
'Threshold': 123.0,
'ComparisonOperator': 'GreaterThanOrEqualToThreshold'|'GreaterThanThreshold'|'LessThanThreshold'|'LessThanOrEqualToThreshold',
'Period': 123,
'MetricName': 'string',
'Namespace': 'string',
'Statistic': 'Average'|'Sum'|'SampleCount'|'Maximum'|'Minimum',
'Dimensions': [
{
'Name': 'string',
'Value': 'string'
},
]
}
}
}
:returns:
RFC 5735, Special Use IPv4 Addresses
RFC 6598, IANA-Reserved IPv4 Prefix for Shared Address Space
RFC 5156, Special-Use IPv6 Addresses
"""
pass
def get_health_check_count():
"""
Retrieves the number of health checks that are associated with the current AWS account.
See also: AWS API Documentation
:example: response = client.get_health_check_count()
:rtype: dict
:return: {
'HealthCheckCount': 123
}
"""
pass
def get_health_check_last_failure_reason(HealthCheckId=None):
"""
Gets the reason that a specified health check failed most recently.
See also: AWS API Documentation
:example: response = client.get_health_check_last_failure_reason(
HealthCheckId='string'
)
:type HealthCheckId: string
:param HealthCheckId: [REQUIRED]
The ID for the health check for which you want the last failure reason. When you created the health check, CreateHealthCheck returned the ID in the response, in the HealthCheckId element.
:rtype: dict
:return: {
'HealthCheckObservations': [
{
'Region': 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1',
'IPAddress': 'string',
'StatusReport': {
'Status': 'string',
'CheckedTime': datetime(2015, 1, 1)
}
},
]
}
"""
pass
def get_health_check_status(HealthCheckId=None):
"""
Gets status of a specified health check.
See also: AWS API Documentation
:example: response = client.get_health_check_status(
HealthCheckId='string'
)
:type HealthCheckId: string
:param HealthCheckId: [REQUIRED]
The ID for the health check that you want the current status for. When you created the health check, CreateHealthCheck returned the ID in the response, in the HealthCheckId element.
Note
If you want to check the status of a calculated health check, you must use the Amazon Route 53 console or the CloudWatch console. You can't use GetHealthCheckStatus to get the status of a calculated health check.
:rtype: dict
:return: {
'HealthCheckObservations': [
{
'Region': 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1',
'IPAddress': 'string',
'StatusReport': {
'Status': 'string',
'CheckedTime': datetime(2015, 1, 1)
}
},
]
}
"""
pass
def get_hosted_zone(Id=None):
"""
Gets information about a specified hosted zone including the four name servers assigned to the hosted zone.
See also: AWS API Documentation
:example: response = client.get_hosted_zone(
Id='string'
)
:type Id: string
:param Id: [REQUIRED]
The ID of the hosted zone that you want to get information about.
:rtype: dict
:return: {
'HostedZone': {
'Id': 'string',
'Name': 'string',
'CallerReference': 'string',
'Config': {
'Comment': 'string',
'PrivateZone': True|False
},
'ResourceRecordSetCount': 123
},
'DelegationSet': {
'Id': 'string',
'CallerReference': 'string',
'NameServers': [
'string',
]
},
'VPCs': [
{
'VPCRegion': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-west-2'|'eu-central-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-south-1'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1'|'ca-central-1'|'cn-north-1',
'VPCId': 'string'
},
]
}
"""
pass
def get_hosted_zone_count():
"""
Retrieves the number of hosted zones that are associated with the current AWS account.
See also: AWS API Documentation
:example: response = client.get_hosted_zone_count()
:rtype: dict
:return: {
'HostedZoneCount': 123
}
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_reusable_delegation_set(Id=None):
"""
Retrieves information about a specified reusable delegation set, including the four name servers that are assigned to the delegation set.
See also: AWS API Documentation
:example: response = client.get_reusable_delegation_set(
Id='string'
)
:type Id: string
:param Id: [REQUIRED]
The ID of the reusable delegation set that you want to get a list of name servers for.
:rtype: dict
:return: {
'DelegationSet': {
'Id': 'string',
'CallerReference': 'string',
'NameServers': [
'string',
]
}
}
"""
pass
def get_traffic_policy(Id=None, Version=None):
"""
Gets information about a specific traffic policy version.
See also: AWS API Documentation
:example: response = client.get_traffic_policy(
Id='string',
Version=123
)
:type Id: string
:param Id: [REQUIRED]
The ID of the traffic policy that you want to get information about.
:type Version: integer
:param Version: [REQUIRED]
The version number of the traffic policy that you want to get information about.
:rtype: dict
:return: {
'TrafficPolicy': {
'Id': 'string',
'Version': 123,
'Name': 'string',
'Type': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
'Document': 'string',
'Comment': 'string'
}
}
"""
pass
def get_traffic_policy_instance(Id=None):
"""
Gets information about a specified traffic policy instance.
See also: AWS API Documentation
:example: response = client.get_traffic_policy_instance(
Id='string'
)
:type Id: string
:param Id: [REQUIRED]
The ID of the traffic policy instance that you want to get information about.
:rtype: dict
:return: {
'TrafficPolicyInstance': {
'Id': 'string',
'HostedZoneId': 'string',
'Name': 'string',
'TTL': 123,
'State': 'string',
'Message': 'string',
'TrafficPolicyId': 'string',
'TrafficPolicyVersion': 123,
'TrafficPolicyType': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA'
}
}
"""
pass
def get_traffic_policy_instance_count():
"""
Gets the number of traffic policy instances that are associated with the current AWS account.
See also: AWS API Documentation
:example: response = client.get_traffic_policy_instance_count()
:rtype: dict
:return: {
'TrafficPolicyInstanceCount': 123
}
"""
pass
def get_waiter():
"""
"""
pass
def list_geo_locations(StartContinentCode=None, StartCountryCode=None, StartSubdivisionCode=None, MaxItems=None):
"""
Retrieves a list of supported geo locations.
Countries are listed first, and continents are listed last. If Amazon Route 53 supports subdivisions for a country (for example, states or provinces), the subdivisions for that country are listed in alphabetical order immediately after the corresponding country.
See also: AWS API Documentation
:example: response = client.list_geo_locations(
StartContinentCode='string',
StartCountryCode='string',
StartSubdivisionCode='string',
MaxItems='string'
)
:type StartContinentCode: string
:param StartContinentCode: The code for the continent with which you want to start listing locations that Amazon Route 53 supports for geolocation. If Amazon Route 53 has already returned a page or more of results, if IsTruncated is true, and if NextContinentCode from the previous response has a value, enter that value in StartContinentCode to return the next page of results.
Include StartContinentCode only if you want to list continents. Don't include StartContinentCode when you're listing countries or countries with their subdivisions.
:type StartCountryCode: string
:param StartCountryCode: The code for the country with which you want to start listing locations that Amazon Route 53 supports for geolocation. If Amazon Route 53 has already returned a page or more of results, if IsTruncated is true , and if NextCountryCode from the previous response has a value, enter that value in StartCountryCode to return the next page of results.
Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1 alpha-2 .
:type StartSubdivisionCode: string
:param StartSubdivisionCode: The code for the subdivision (for example, state or province) with which you want to start listing locations that Amazon Route 53 supports for geolocation. If Amazon Route 53 has already returned a page or more of results, if IsTruncated is true , and if NextSubdivisionCode from the previous response has a value, enter that value in StartSubdivisionCode to return the next page of results.
To list subdivisions of a country, you must include both StartCountryCode and StartSubdivisionCode .
:type MaxItems: string
:param MaxItems: (Optional) The maximum number of geolocations to be included in the response body for this request. If more than MaxItems geolocations remain to be listed, then the value of the IsTruncated element in the response is true .
:rtype: dict
:return: {
'GeoLocationDetailsList': [
{
'ContinentCode': 'string',
'ContinentName': 'string',
'CountryCode': 'string',
'CountryName': 'string',
'SubdivisionCode': 'string',
'SubdivisionName': 'string'
},
],
'IsTruncated': True|False,
'NextContinentCode': 'string',
'NextCountryCode': 'string',
'NextSubdivisionCode': 'string',
'MaxItems': 'string'
}
"""
pass
def list_health_checks(Marker=None, MaxItems=None):
"""
Retrieve a list of the health checks that are associated with the current AWS account.
See also: AWS API Documentation
:example: response = client.list_health_checks(
Marker='string',
MaxItems='string'
)
:type Marker: string
:param Marker: If the value of IsTruncated in the previous response was true , you have more health checks. To get another group, submit another ListHealthChecks request.
For the value of marker , specify the value of NextMarker from the previous response, which is the ID of the first health check that Amazon Route 53 will return if you submit another request.
If the value of IsTruncated in the previous response was false , there are no more health checks to get.
:type MaxItems: string
:param MaxItems: The maximum number of health checks that you want ListHealthChecks to return in response to the current request. Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100 health checks.
:rtype: dict
:return: {
'HealthChecks': [
{
'Id': 'string',
'CallerReference': 'string',
'HealthCheckConfig': {
'IPAddress': 'string',
'Port': 123,
'Type': 'HTTP'|'HTTPS'|'HTTP_STR_MATCH'|'HTTPS_STR_MATCH'|'TCP'|'CALCULATED'|'CLOUDWATCH_METRIC',
'ResourcePath': 'string',
'FullyQualifiedDomainName': 'string',
'SearchString': 'string',
'RequestInterval': 123,
'FailureThreshold': 123,
'MeasureLatency': True|False,
'Inverted': True|False,
'HealthThreshold': 123,
'ChildHealthChecks': [
'string',
],
'EnableSNI': True|False,
'Regions': [
'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1',
],
'AlarmIdentifier': {
'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-central-1'|'eu-west-1'|'eu-west-2'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1',
'Name': 'string'
},
'InsufficientDataHealthStatus': 'Healthy'|'Unhealthy'|'LastKnownStatus'
},
'HealthCheckVersion': 123,
'CloudWatchAlarmConfiguration': {
'EvaluationPeriods': 123,
'Threshold': 123.0,
'ComparisonOperator': 'GreaterThanOrEqualToThreshold'|'GreaterThanThreshold'|'LessThanThreshold'|'LessThanOrEqualToThreshold',
'Period': 123,
'MetricName': 'string',
'Namespace': 'string',
'Statistic': 'Average'|'Sum'|'SampleCount'|'Maximum'|'Minimum',
'Dimensions': [
{
'Name': 'string',
'Value': 'string'
},
]
}
},
],
'Marker': 'string',
'IsTruncated': True|False,
'NextMarker': 'string',
'MaxItems': 'string'
}
:returns:
IPv4 address : four values between 0 and 255, separated by periods (.), for example, 192.0.2.44 .
IPv6 address : eight groups of four hexadecimal values, separated by colons (:), for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 . You can also shorten IPv6 addresses as described in RFC 5952, for example, 2001:db8:85a3::abcd:1:2345 .
"""
pass
def list_hosted_zones(Marker=None, MaxItems=None, DelegationSetId=None):
"""
Retrieves a list of the public and private hosted zones that are associated with the current AWS account. The response includes a HostedZones child element for each hosted zone.
Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of hosted zones, you can use the maxitems parameter to list them in groups of up to 100.
See also: AWS API Documentation
:example: response = client.list_hosted_zones(
Marker='string',
MaxItems='string',
DelegationSetId='string'
)
:type Marker: string
:param Marker: If the value of IsTruncated in the previous response was true , you have more hosted zones. To get more hosted zones, submit another ListHostedZones request.
For the value of marker , specify the value of NextMarker from the previous response, which is the ID of the first hosted zone that Amazon Route 53 will return if you submit another request.
If the value of IsTruncated in the previous response was false , there are no more hosted zones to get.
:type MaxItems: string
:param MaxItems: (Optional) The maximum number of hosted zones that you want Amazon Route 53 to return. If you have more than maxitems hosted zones, the value of IsTruncated in the response is true , and the value of NextMarker is the hosted zone ID of the first hosted zone that Amazon Route 53 will return if you submit another request.
:type DelegationSetId: string
:param DelegationSetId: If you're using reusable delegation sets and you want to list all of the hosted zones that are associated with a reusable delegation set, specify the ID of that reusable delegation set.
:rtype: dict
:return: {
'HostedZones': [
{
'Id': 'string',
'Name': 'string',
'CallerReference': 'string',
'Config': {
'Comment': 'string',
'PrivateZone': True|False
},
'ResourceRecordSetCount': 123
},
],
'Marker': 'string',
'IsTruncated': True|False,
'NextMarker': 'string',
'MaxItems': 'string'
}
"""
pass
def list_hosted_zones_by_name(DNSName=None, HostedZoneId=None, MaxItems=None):
"""
Retrieves a list of your hosted zones in lexicographic order. The response includes a HostedZones child element for each hosted zone created by the current AWS account.
Note the trailing dot, which can change the sort order in some circumstances.
If the domain name includes escape characters or Punycode, ListHostedZonesByName alphabetizes the domain name using the escaped or Punycoded value, which is the format that Amazon Route 53 saves in its database. For example, to create a hosted zone for exmple.com, you specify ex344mple.com for the domain name. ListHostedZonesByName alphabetizes it as:
The labels are reversed and alphabetized using the escaped value. For more information about valid domain name formats, including internationalized domain names, see DNS Domain Name Format in the Amazon Route 53 Developer Guide .
Amazon Route 53 returns up to 100 items in each response. If you have a lot of hosted zones, use the MaxItems parameter to list them in groups of up to 100. The response includes values that help navigate from one group of MaxItems hosted zones to the next:
See also: AWS API Documentation
:example: response = client.list_hosted_zones_by_name(
DNSName='string',
HostedZoneId='string',
MaxItems='string'
)
:type DNSName: string
:param DNSName: (Optional) For your first request to ListHostedZonesByName , include the dnsname parameter only if you want to specify the name of the first hosted zone in the response. If you don't include the dnsname parameter, Amazon Route 53 returns all of the hosted zones that were created by the current AWS account, in ASCII order. For subsequent requests, include both dnsname and hostedzoneid parameters. For dnsname , specify the value of NextDNSName from the previous response.
:type HostedZoneId: string
:param HostedZoneId: (Optional) For your first request to ListHostedZonesByName , do not include the hostedzoneid parameter.
If you have more hosted zones than the value of maxitems , ListHostedZonesByName returns only the first maxitems hosted zones. To get the next group of maxitems hosted zones, submit another request to ListHostedZonesByName and include both dnsname and hostedzoneid parameters. For the value of hostedzoneid , specify the value of the NextHostedZoneId element from the previous response.
:type MaxItems: string
:param MaxItems: The maximum number of hosted zones to be included in the response body for this request. If you have more than maxitems hosted zones, then the value of the IsTruncated element in the response is true, and the values of NextDNSName and NextHostedZoneId specify the first hosted zone in the next group of maxitems hosted zones.
:rtype: dict
:return: {
'HostedZones': [
{
'Id': 'string',
'Name': 'string',
'CallerReference': 'string',
'Config': {
'Comment': 'string',
'PrivateZone': True|False
},
'ResourceRecordSetCount': 123
},
],
'DNSName': 'string',
'HostedZoneId': 'string',
'IsTruncated': True|False,
'NextDNSName': 'string',
'NextHostedZoneId': 'string',
'MaxItems': 'string'
}
:returns:
DNSName (string) -- (Optional) For your first request to ListHostedZonesByName , include the dnsname parameter only if you want to specify the name of the first hosted zone in the response. If you don't include the dnsname parameter, Amazon Route 53 returns all of the hosted zones that were created by the current AWS account, in ASCII order. For subsequent requests, include both dnsname and hostedzoneid parameters. For dnsname , specify the value of NextDNSName from the previous response.
HostedZoneId (string) -- (Optional) For your first request to ListHostedZonesByName , do not include the hostedzoneid parameter.
If you have more hosted zones than the value of maxitems , ListHostedZonesByName returns only the first maxitems hosted zones. To get the next group of maxitems hosted zones, submit another request to ListHostedZonesByName and include both dnsname and hostedzoneid parameters. For the value of hostedzoneid , specify the value of the NextHostedZoneId element from the previous response.
MaxItems (string) -- The maximum number of hosted zones to be included in the response body for this request. If you have more than maxitems hosted zones, then the value of the IsTruncated element in the response is true, and the values of NextDNSName and NextHostedZoneId specify the first hosted zone in the next group of maxitems hosted zones.
"""
pass
def list_resource_record_sets(HostedZoneId=None, StartRecordName=None, StartRecordType=None, StartRecordIdentifier=None, MaxItems=None):
"""
Lists the resource record sets in a specified hosted zone.
Note the trailing dot, which can change the sort order in some circumstances.
When multiple records have the same DNS name, the action sorts results by the record type.
You can use the name and type elements to adjust the beginning position of the list of resource record sets returned:
The results begin with the first resource record set that the hosted zone contains.
The results begin with the first resource record set in the list whose name is greater than or equal to Name .
Amazon Route 53 returns the InvalidInput error.
The results begin with the first resource record set in the list whose name is greater than or equal to Name , and whose type is greater than or equal to Type .
This action returns the most current version of the records. This includes records that are PENDING , and that are not yet available on all Amazon Route 53 DNS servers.
To ensure that you get an accurate listing of the resource record sets for a hosted zone at a point in time, do not submit a ChangeResourceRecordSets request while you're paging through the results of a ListResourceRecordSets request. If you do, some pages may display results without the latest changes while other pages display results with the latest changes.
See also: AWS API Documentation
:example: response = client.list_resource_record_sets(
HostedZoneId='string',
StartRecordName='string',
StartRecordType='SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
StartRecordIdentifier='string',
MaxItems='string'
)
:type HostedZoneId: string
:param HostedZoneId: [REQUIRED]
The ID of the hosted zone that contains the resource record sets that you want to list.
:type StartRecordName: string
:param StartRecordName: The first name in the lexicographic ordering of resource record sets that you want to list.
:type StartRecordType: string
:param StartRecordType: The type of resource record set to begin the record listing from.
Valid values for basic resource record sets: A | AAAA | CNAME | MX | NAPTR | NS | PTR | SOA | SPF | SRV | TXT
Values for weighted, latency, geo, and failover resource record sets: A | AAAA | CNAME | MX | NAPTR | PTR | SPF | SRV | TXT
Values for alias resource record sets:
CloudFront distribution : A or AAAA
Elastic Beanstalk environment that has a regionalized subdomain : A
ELB load balancer : A | AAAA
Amazon S3 bucket : A
Constraint: Specifying type without specifying name returns an InvalidInput error.
:type StartRecordIdentifier: string
:param StartRecordIdentifier: Weighted resource record sets only: If results were truncated for a given DNS name and type, specify the value of NextRecordIdentifier from the previous response to get the next resource record set that has the current DNS name and type.
:type MaxItems: string
:param MaxItems: (Optional) The maximum number of resource records sets to include in the response body for this request. If the response includes more than maxitems resource record sets, the value of the IsTruncated element in the response is true , and the values of the NextRecordName and NextRecordType elements in the response identify the first resource record set in the next group of maxitems resource record sets.
:rtype: dict
:return: {
'ResourceRecordSets': [
{
'Name': 'string',
'Type': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
'SetIdentifier': 'string',
'Weight': 123,
'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-west-1'|'eu-west-2'|'eu-central-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1'|'cn-north-1'|'ap-south-1',
'GeoLocation': {
'ContinentCode': 'string',
'CountryCode': 'string',
'SubdivisionCode': 'string'
},
'Failover': 'PRIMARY'|'SECONDARY',
'TTL': 123,
'ResourceRecords': [
{
'Value': 'string'
},
],
'AliasTarget': {
'HostedZoneId': 'string',
'DNSName': 'string',
'EvaluateTargetHealth': True|False
},
'HealthCheckId': 'string',
'TrafficPolicyInstanceId': 'string'
},
],
'IsTruncated': True|False,
'NextRecordName': 'string',
'NextRecordType': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
'NextRecordIdentifier': 'string',
'MaxItems': 'string'
}
:returns:
The * must replace the entire label. For example, you can't specify *prod.example.com or prod*.example.com .
The * can't replace any of the middle labels, for example, marketing.*.example.com.
If you include * in any position other than the leftmost label in a domain name, DNS treats it as an * character (ASCII 42), not as a wildcard.
"""
pass
def list_reusable_delegation_sets(Marker=None, MaxItems=None):
"""
Retrieves a list of the reusable delegation sets that are associated with the current AWS account.
See also: AWS API Documentation
:example: response = client.list_reusable_delegation_sets(
Marker='string',
MaxItems='string'
)
:type Marker: string
:param Marker: If the value of IsTruncated in the previous response was true , you have more reusable delegation sets. To get another group, submit another ListReusableDelegationSets request.
For the value of marker , specify the value of NextMarker from the previous response, which is the ID of the first reusable delegation set that Amazon Route 53 will return if you submit another request.
If the value of IsTruncated in the previous response was false , there are no more reusable delegation sets to get.
:type MaxItems: string
:param MaxItems: The number of reusable delegation sets that you want Amazon Route 53 to return in the response to this request. If you specify a value greater than 100, Amazon Route 53 returns only the first 100 reusable delegation sets.
:rtype: dict
:return: {
'DelegationSets': [
{
'Id': 'string',
'CallerReference': 'string',
'NameServers': [
'string',
]
},
],
'Marker': 'string',
'IsTruncated': True|False,
'NextMarker': 'string',
'MaxItems': 'string'
}
:returns:
(string) --
"""
pass
def list_tags_for_resource(ResourceType=None, ResourceId=None):
"""
Lists tags for one health check or hosted zone.
For information about using tags for cost allocation, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .
See also: AWS API Documentation
:example: response = client.list_tags_for_resource(
ResourceType='healthcheck'|'hostedzone',
ResourceId='string'
)
:type ResourceType: string
:param ResourceType: [REQUIRED]
The type of the resource.
The resource type for health checks is healthcheck .
The resource type for hosted zones is hostedzone .
:type ResourceId: string
:param ResourceId: [REQUIRED]
The ID of the resource for which you want to retrieve tags.
:rtype: dict
:return: {
'ResourceTagSet': {
'ResourceType': 'healthcheck'|'hostedzone',
'ResourceId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
The resource type for health checks is healthcheck .
The resource type for hosted zones is hostedzone .
"""
pass
def list_tags_for_resources(ResourceType=None, ResourceIds=None):
"""
Lists tags for up to 10 health checks or hosted zones.
For information about using tags for cost allocation, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .
See also: AWS API Documentation
:example: response = client.list_tags_for_resources(
ResourceType='healthcheck'|'hostedzone',
ResourceIds=[
'string',
]
)
:type ResourceType: string
:param ResourceType: [REQUIRED]
The type of the resources.
The resource type for health checks is healthcheck .
The resource type for hosted zones is hostedzone .
:type ResourceIds: list
:param ResourceIds: [REQUIRED]
A complex type that contains the ResourceId element for each resource for which you want to get a list of tags.
(string) --
:rtype: dict
:return: {
'ResourceTagSets': [
{
'ResourceType': 'healthcheck'|'hostedzone',
'ResourceId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
:returns:
The resource type for health checks is healthcheck .
The resource type for hosted zones is hostedzone .
"""
pass
def list_traffic_policies(TrafficPolicyIdMarker=None, MaxItems=None):
"""
Gets information about the latest version for every traffic policy that is associated with the current AWS account. Policies are listed in the order in which they were created.
See also: AWS API Documentation
:example: response = client.list_traffic_policies(
TrafficPolicyIdMarker='string',
MaxItems='string'
)
:type TrafficPolicyIdMarker: string
:param TrafficPolicyIdMarker: (Conditional) For your first request to ListTrafficPolicies , don't include the TrafficPolicyIdMarker parameter.
If you have more traffic policies than the value of MaxItems , ListTrafficPolicies returns only the first MaxItems traffic policies. To get the next group of policies, submit another request to ListTrafficPolicies . For the value of TrafficPolicyIdMarker , specify the value of TrafficPolicyIdMarker that was returned in the previous response.
:type MaxItems: string
:param MaxItems: (Optional) The maximum number of traffic policies that you want Amazon Route 53 to return in response to this request. If you have more than MaxItems traffic policies, the value of IsTruncated in the response is true , and the value of TrafficPolicyIdMarker is the ID of the first traffic policy that Amazon Route 53 will return if you submit another request.
:rtype: dict
:return: {
'TrafficPolicySummaries': [
{
'Id': 'string',
'Name': 'string',
'Type': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
'LatestVersion': 123,
'TrafficPolicyCount': 123
},
],
'IsTruncated': True|False,
'TrafficPolicyIdMarker': 'string',
'MaxItems': 'string'
}
"""
pass
def list_traffic_policy_instances(HostedZoneIdMarker=None, TrafficPolicyInstanceNameMarker=None, TrafficPolicyInstanceTypeMarker=None, MaxItems=None):
"""
Gets information about the traffic policy instances that you created by using the current AWS account.
Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policy instances, you can use the MaxItems parameter to list them in groups of up to 100.
See also: AWS API Documentation
:example: response = client.list_traffic_policy_instances(
HostedZoneIdMarker='string',
TrafficPolicyInstanceNameMarker='string',
TrafficPolicyInstanceTypeMarker='SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
MaxItems='string'
)
:type HostedZoneIdMarker: string
:param HostedZoneIdMarker: If the value of IsTruncated in the previous response was true , you have more traffic policy instances. To get more traffic policy instances, submit another ListTrafficPolicyInstances request. For the value of HostedZoneId , specify the value of HostedZoneIdMarker from the previous response, which is the hosted zone ID of the first traffic policy instance in the next group of traffic policy instances.
If the value of IsTruncated in the previous response was false , there are no more traffic policy instances to get.
:type TrafficPolicyInstanceNameMarker: string
:param TrafficPolicyInstanceNameMarker: If the value of IsTruncated in the previous response was true , you have more traffic policy instances. To get more traffic policy instances, submit another ListTrafficPolicyInstances request. For the value of trafficpolicyinstancename , specify the value of TrafficPolicyInstanceNameMarker from the previous response, which is the name of the first traffic policy instance in the next group of traffic policy instances.
If the value of IsTruncated in the previous response was false , there are no more traffic policy instances to get.
:type TrafficPolicyInstanceTypeMarker: string
:param TrafficPolicyInstanceTypeMarker: If the value of IsTruncated in the previous response was true , you have more traffic policy instances. To get more traffic policy instances, submit another ListTrafficPolicyInstances request. For the value of trafficpolicyinstancetype , specify the value of TrafficPolicyInstanceTypeMarker from the previous response, which is the type of the first traffic policy instance in the next group of traffic policy instances.
If the value of IsTruncated in the previous response was false , there are no more traffic policy instances to get.
:type MaxItems: string
:param MaxItems: The maximum number of traffic policy instances that you want Amazon Route 53 to return in response to a ListTrafficPolicyInstances request. If you have more than MaxItems traffic policy instances, the value of the IsTruncated element in the response is true , and the values of HostedZoneIdMarker , TrafficPolicyInstanceNameMarker , and TrafficPolicyInstanceTypeMarker represent the first traffic policy instance in the next group of MaxItems traffic policy instances.
:rtype: dict
:return: {
'TrafficPolicyInstances': [
{
'Id': 'string',
'HostedZoneId': 'string',
'Name': 'string',
'TTL': 123,
'State': 'string',
'Message': 'string',
'TrafficPolicyId': 'string',
'TrafficPolicyVersion': 123,
'TrafficPolicyType': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA'
},
],
'HostedZoneIdMarker': 'string',
'TrafficPolicyInstanceNameMarker': 'string',
'TrafficPolicyInstanceTypeMarker': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
'IsTruncated': True|False,
'MaxItems': 'string'
}
"""
pass
def list_traffic_policy_instances_by_hosted_zone(HostedZoneId=None, TrafficPolicyInstanceNameMarker=None, TrafficPolicyInstanceTypeMarker=None, MaxItems=None):
"""
Gets information about the traffic policy instances that you created in a specified hosted zone.
Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policy instances, you can use the MaxItems parameter to list them in groups of up to 100.
See also: AWS API Documentation
:example: response = client.list_traffic_policy_instances_by_hosted_zone(
HostedZoneId='string',
TrafficPolicyInstanceNameMarker='string',
TrafficPolicyInstanceTypeMarker='SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
MaxItems='string'
)
:type HostedZoneId: string
:param HostedZoneId: [REQUIRED]
The ID of the hosted zone that you want to list traffic policy instances for.
:type TrafficPolicyInstanceNameMarker: string
:param TrafficPolicyInstanceNameMarker: If the value of IsTruncated in the previous response is true, you have more traffic policy instances. To get more traffic policy instances, submit another ListTrafficPolicyInstances request. For the value of trafficpolicyinstancename , specify the value of TrafficPolicyInstanceNameMarker from the previous response, which is the name of the first traffic policy instance in the next group of traffic policy instances.
If the value of IsTruncated in the previous response was false , there are no more traffic policy instances to get.
:type TrafficPolicyInstanceTypeMarker: string
:param TrafficPolicyInstanceTypeMarker: If the value of IsTruncated in the previous response is true, you have more traffic policy instances. To get more traffic policy instances, submit another ListTrafficPolicyInstances request. For the value of trafficpolicyinstancetype , specify the value of TrafficPolicyInstanceTypeMarker from the previous response, which is the type of the first traffic policy instance in the next group of traffic policy instances.
If the value of IsTruncated in the previous response was false , there are no more traffic policy instances to get.
:type MaxItems: string
:param MaxItems: The maximum number of traffic policy instances to be included in the response body for this request. If you have more than MaxItems traffic policy instances, the value of the IsTruncated element in the response is true , and the values of HostedZoneIdMarker , TrafficPolicyInstanceNameMarker , and TrafficPolicyInstanceTypeMarker represent the first traffic policy instance that Amazon Route 53 will return if you submit another request.
:rtype: dict
:return: {
'TrafficPolicyInstances': [
{
'Id': 'string',
'HostedZoneId': 'string',
'Name': 'string',
'TTL': 123,
'State': 'string',
'Message': 'string',
'TrafficPolicyId': 'string',
'TrafficPolicyVersion': 123,
'TrafficPolicyType': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA'
},
],
'TrafficPolicyInstanceNameMarker': 'string',
'TrafficPolicyInstanceTypeMarker': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
'IsTruncated': True|False,
'MaxItems': 'string'
}
"""
pass
def list_traffic_policy_instances_by_policy(TrafficPolicyId=None, TrafficPolicyVersion=None, HostedZoneIdMarker=None, TrafficPolicyInstanceNameMarker=None, TrafficPolicyInstanceTypeMarker=None, MaxItems=None):
"""
Gets information about the traffic policy instances that you created by using a specify traffic policy version.
Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policy instances, you can use the MaxItems parameter to list them in groups of up to 100.
See also: AWS API Documentation
:example: response = client.list_traffic_policy_instances_by_policy(
TrafficPolicyId='string',
TrafficPolicyVersion=123,
HostedZoneIdMarker='string',
TrafficPolicyInstanceNameMarker='string',
TrafficPolicyInstanceTypeMarker='SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
MaxItems='string'
)
:type TrafficPolicyId: string
:param TrafficPolicyId: [REQUIRED]
The ID of the traffic policy for which you want to list traffic policy instances.
:type TrafficPolicyVersion: integer
:param TrafficPolicyVersion: [REQUIRED]
The version of the traffic policy for which you want to list traffic policy instances. The version must be associated with the traffic policy that is specified by TrafficPolicyId .
:type HostedZoneIdMarker: string
:param HostedZoneIdMarker: If the value of IsTruncated in the previous response was true , you have more traffic policy instances. To get more traffic policy instances, submit another ListTrafficPolicyInstancesByPolicy request.
For the value of hostedzoneid , specify the value of HostedZoneIdMarker from the previous response, which is the hosted zone ID of the first traffic policy instance that Amazon Route 53 will return if you submit another request.
If the value of IsTruncated in the previous response was false , there are no more traffic policy instances to get.
:type TrafficPolicyInstanceNameMarker: string
:param TrafficPolicyInstanceNameMarker: If the value of IsTruncated in the previous response was true , you have more traffic policy instances. To get more traffic policy instances, submit another ListTrafficPolicyInstancesByPolicy request.
For the value of trafficpolicyinstancename , specify the value of TrafficPolicyInstanceNameMarker from the previous response, which is the name of the first traffic policy instance that Amazon Route 53 will return if you submit another request.
If the value of IsTruncated in the previous response was false , there are no more traffic policy instances to get.
:type TrafficPolicyInstanceTypeMarker: string
:param TrafficPolicyInstanceTypeMarker: If the value of IsTruncated in the previous response was true , you have more traffic policy instances. To get more traffic policy instances, submit another ListTrafficPolicyInstancesByPolicy request.
For the value of trafficpolicyinstancetype , specify the value of TrafficPolicyInstanceTypeMarker from the previous response, which is the name of the first traffic policy instance that Amazon Route 53 will return if you submit another request.
If the value of IsTruncated in the previous response was false , there are no more traffic policy instances to get.
:type MaxItems: string
:param MaxItems: The maximum number of traffic policy instances to be included in the response body for this request. If you have more than MaxItems traffic policy instances, the value of the IsTruncated element in the response is true , and the values of HostedZoneIdMarker , TrafficPolicyInstanceNameMarker , and TrafficPolicyInstanceTypeMarker represent the first traffic policy instance that Amazon Route 53 will return if you submit another request.
:rtype: dict
:return: {
'TrafficPolicyInstances': [
{
'Id': 'string',
'HostedZoneId': 'string',
'Name': 'string',
'TTL': 123,
'State': 'string',
'Message': 'string',
'TrafficPolicyId': 'string',
'TrafficPolicyVersion': 123,
'TrafficPolicyType': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA'
},
],
'HostedZoneIdMarker': 'string',
'TrafficPolicyInstanceNameMarker': 'string',
'TrafficPolicyInstanceTypeMarker': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
'IsTruncated': True|False,
'MaxItems': 'string'
}
"""
pass
def list_traffic_policy_versions(Id=None, TrafficPolicyVersionMarker=None, MaxItems=None):
"""
Gets information about all of the versions for a specified traffic policy.
Traffic policy versions are listed in numerical order by VersionNumber .
See also: AWS API Documentation
:example: response = client.list_traffic_policy_versions(
Id='string',
TrafficPolicyVersionMarker='string',
MaxItems='string'
)
:type Id: string
:param Id: [REQUIRED]
Specify the value of Id of the traffic policy for which you want to list all versions.
:type TrafficPolicyVersionMarker: string
:param TrafficPolicyVersionMarker: For your first request to ListTrafficPolicyVersions , don't include the TrafficPolicyVersionMarker parameter.
If you have more traffic policy versions than the value of MaxItems , ListTrafficPolicyVersions returns only the first group of MaxItems versions. To get more traffic policy versions, submit another ListTrafficPolicyVersions request. For the value of TrafficPolicyVersionMarker , specify the value of TrafficPolicyVersionMarker in the previous response.
:type MaxItems: string
:param MaxItems: The maximum number of traffic policy versions that you want Amazon Route 53 to include in the response body for this request. If the specified traffic policy has more than MaxItems versions, the value of IsTruncated in the response is true , and the value of the TrafficPolicyVersionMarker element is the ID of the first version that Amazon Route 53 will return if you submit another request.
:rtype: dict
:return: {
'TrafficPolicies': [
{
'Id': 'string',
'Version': 123,
'Name': 'string',
'Type': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
'Document': 'string',
'Comment': 'string'
},
],
'IsTruncated': True|False,
'TrafficPolicyVersionMarker': 'string',
'MaxItems': 'string'
}
"""
pass
def list_vpc_association_authorizations(HostedZoneId=None, NextToken=None, MaxResults=None):
"""
Gets a list of the VPCs that were created by other accounts and that can be associated with a specified hosted zone because you've submitted one or more CreateVPCAssociationAuthorization requests.
The response includes a VPCs element with a VPC child element for each VPC that can be associated with the hosted zone.
See also: AWS API Documentation
:example: response = client.list_vpc_association_authorizations(
HostedZoneId='string',
NextToken='string',
MaxResults='string'
)
:type HostedZoneId: string
:param HostedZoneId: [REQUIRED]
The ID of the hosted zone for which you want a list of VPCs that can be associated with the hosted zone.
:type NextToken: string
:param NextToken: Optional : If a response includes a NextToken element, there are more VPCs that can be associated with the specified hosted zone. To get the next page of results, submit another request, and include the value of NextToken from the response in the nexttoken parameter in another ListVPCAssociationAuthorizations request.
:type MaxResults: string
:param MaxResults: Optional : An integer that specifies the maximum number of VPCs that you want Amazon Route 53 to return. If you don't specify a value for MaxResults , Amazon Route 53 returns up to 50 VPCs per page.
:rtype: dict
:return: {
'HostedZoneId': 'string',
'NextToken': 'string',
'VPCs': [
{
'VPCRegion': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-west-2'|'eu-central-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-south-1'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1'|'ca-central-1'|'cn-north-1',
'VPCId': 'string'
},
]
}
"""
pass
def test_dns_answer(HostedZoneId=None, RecordName=None, RecordType=None, ResolverIP=None, EDNS0ClientSubnetIP=None, EDNS0ClientSubnetMask=None):
"""
Gets the value that Amazon Route 53 returns in response to a DNS request for a specified record name and type. You can optionally specify the IP address of a DNS resolver, an EDNS0 client subnet IP address, and a subnet mask.
See also: AWS API Documentation
:example: response = client.test_dns_answer(
HostedZoneId='string',
RecordName='string',
RecordType='SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
ResolverIP='string',
EDNS0ClientSubnetIP='string',
EDNS0ClientSubnetMask='string'
)
:type HostedZoneId: string
:param HostedZoneId: [REQUIRED]
The ID of the hosted zone that you want Amazon Route 53 to simulate a query for.
:type RecordName: string
:param RecordName: [REQUIRED]
The name of the resource record set that you want Amazon Route 53 to simulate a query for.
:type RecordType: string
:param RecordType: [REQUIRED]
The type of the resource record set.
:type ResolverIP: string
:param ResolverIP: If you want to simulate a request from a specific DNS resolver, specify the IP address for that resolver. If you omit this value, TestDnsAnswer uses the IP address of a DNS resolver in the AWS US East (N. Virginia) Region (us-east-1 ).
:type EDNS0ClientSubnetIP: string
:param EDNS0ClientSubnetIP: If the resolver that you specified for resolverip supports EDNS0, specify the IPv4 or IPv6 address of a client in the applicable location, for example, 192.0.2.44 or 2001:db8:85a3::8a2e:370:7334 .
:type EDNS0ClientSubnetMask: string
:param EDNS0ClientSubnetMask: If you specify an IP address for edns0clientsubnetip , you can optionally specify the number of bits of the IP address that you want the checking tool to include in the DNS query. For example, if you specify 192.0.2.44 for edns0clientsubnetip and 24 for edns0clientsubnetmask , the checking tool will simulate a request from 192.0.2.0/24. The default value is 24 bits for IPv4 addresses and 64 bits for IPv6 addresses.
:rtype: dict
:return: {
'Nameserver': 'string',
'RecordName': 'string',
'RecordType': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
'RecordData': [
'string',
],
'ResponseCode': 'string',
'Protocol': 'string'
}
:returns:
For non-alias resource record sets, a RecordDataEntry element contains one value in the resource record set. If the resource record set contains multiple values, the response includes one RecordDataEntry element for each value.
For multiple resource record sets that have the same name and type, which includes weighted, latency, geolocation, and failover, a RecordDataEntry element contains the value from the appropriate resource record set based on the request.
For alias resource record sets that refer to AWS resources other than another resource record set, the RecordDataEntry element contains an IP address or a domain name for the AWS resource, depending on the type of resource.
For alias resource record sets that refer to other resource record sets, a RecordDataEntry element contains one value from the referenced resource record set. If the referenced resource record set contains multiple values, the response includes one RecordDataEntry element for each value.
"""
pass
def update_health_check(HealthCheckId=None, HealthCheckVersion=None, IPAddress=None, Port=None, ResourcePath=None, FullyQualifiedDomainName=None, SearchString=None, FailureThreshold=None, Inverted=None, HealthThreshold=None, ChildHealthChecks=None, EnableSNI=None, Regions=None, AlarmIdentifier=None, InsufficientDataHealthStatus=None):
"""
Updates an existing health check. Note that some values can't be updated.
For more information about updating health checks, see Creating, Updating, and Deleting Health Checks in the Amazon Route 53 Developer Guide .
See also: AWS API Documentation
:example: response = client.update_health_check(
HealthCheckId='string',
HealthCheckVersion=123,
IPAddress='string',
Port=123,
ResourcePath='string',
FullyQualifiedDomainName='string',
SearchString='string',
FailureThreshold=123,
Inverted=True|False,
HealthThreshold=123,
ChildHealthChecks=[
'string',
],
EnableSNI=True|False,
Regions=[
'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1',
],
AlarmIdentifier={
'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-central-1'|'eu-west-1'|'eu-west-2'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1',
'Name': 'string'
},
InsufficientDataHealthStatus='Healthy'|'Unhealthy'|'LastKnownStatus'
)
:type HealthCheckId: string
:param HealthCheckId: [REQUIRED]
The ID for the health check for which you want detailed information. When you created the health check, CreateHealthCheck returned the ID in the response, in the HealthCheckId element.
:type HealthCheckVersion: integer
:param HealthCheckVersion: A sequential counter that Amazon Route 53 sets to 1 when you create a health check and increments by 1 each time you update settings for the health check.
We recommend that you use GetHealthCheck or ListHealthChecks to get the current value of HealthCheckVersion for the health check that you want to update, and that you include that value in your UpdateHealthCheck request. This prevents Amazon Route 53 from overwriting an intervening update:
If the value in the UpdateHealthCheck request matches the value of HealthCheckVersion in the health check, Amazon Route 53 updates the health check with the new settings.
If the value of HealthCheckVersion in the health check is greater, the health check was changed after you got the version number. Amazon Route 53 does not update the health check, and it returns a HealthCheckVersionMismatch error.
:type IPAddress: string
:param IPAddress: The IPv4 or IPv6 IP address for the endpoint that you want Amazon Route 53 to perform health checks on. If you don't specify a value for IPAddress , Amazon Route 53 sends a DNS request to resolve the domain name that you specify in FullyQualifiedDomainName at the interval that you specify in RequestInterval . Using an IP address that is returned by DNS, Amazon Route 53 then checks the health of the endpoint.
Use one of the following formats for the value of IPAddress :
IPv4 address : four values between 0 and 255, separated by periods (.), for example, 192.0.2.44 .
IPv6 address : eight groups of four hexadecimal values, separated by colons (:), for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 . You can also shorten IPv6 addresses as described in RFC 5952, for example, 2001:db8:85a3::abcd:1:2345 .
If the endpoint is an EC2 instance, we recommend that you create an Elastic IP address, associate it with your EC2 instance, and specify the Elastic IP address for IPAddress . This ensures that the IP address of your instance never changes. For more information, see the applicable documentation:
Linux: Elastic IP Addresses (EIP) in the Amazon EC2 User Guide for Linux Instances
Windows: Elastic IP Addresses (EIP) in the Amazon EC2 User Guide for Windows Instances
Note
If a health check already has a value for IPAddress , you can change the value. However, you can't update an existing health check to add or remove the value of IPAddress .
For more information, see UpdateHealthCheckRequest$FullyQualifiedDomainName .
Constraints: Amazon Route 53 can't check the health of endpoints for which the IP address is in local, private, non-routable, or multicast ranges. For more information about IP addresses for which you can't create health checks, see the following documents:
RFC 5735, Special Use IPv4 Addresses
RFC 6598, IANA-Reserved IPv4 Prefix for Shared Address Space
RFC 5156, Special-Use IPv6 Addresses
:type Port: integer
:param Port: The port on the endpoint on which you want Amazon Route 53 to perform health checks.
:type ResourcePath: string
:param ResourcePath: The path that you want Amazon Route 53 to request when performing health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, for example the file /docs/route53-health-check.html.
Specify this value only if you want to change it.
:type FullyQualifiedDomainName: string
:param FullyQualifiedDomainName: Amazon Route 53 behavior depends on whether you specify a value for IPAddress .
Note
If a health check already has a value for IPAddress , you can change the value. However, you can't update an existing health check to add or remove the value of IPAddress .
If you specify a value for IPAddress :
Amazon Route 53 sends health check requests to the specified IPv4 or IPv6 address and passes the value of FullyQualifiedDomainName in the Host header for all health checks except TCP health checks. This is typically the fully qualified DNS name of the endpoint on which you want Amazon Route 53 to perform health checks.
When Amazon Route 53 checks the health of an endpoint, here is how it constructs the Host header:
If you specify a value of 80 for Port and HTTP or HTTP_STR_MATCH for Type , Amazon Route 53 passes the value of FullyQualifiedDomainName to the endpoint in the Host header.
If you specify a value of 443 for Port and HTTPS or HTTPS_STR_MATCH for Type , Amazon Route 53 passes the value of FullyQualifiedDomainName to the endpoint in the Host header.
If you specify another value for Port and any value except TCP for Type , Amazon Route 53 passes * FullyQualifiedDomainName :Port * to the endpoint in the Host header.
If you don't specify a value for FullyQualifiedDomainName , Amazon Route 53 substitutes the value of IPAddress in the Host header in each of the above cases.
If you don't specify a value for IPAddress :
If you don't specify a value for IPAddress , Amazon Route 53 sends a DNS request to the domain that you specify in FullyQualifiedDomainName at the interval you specify in RequestInterval . Using an IPv4 address that is returned by DNS, Amazon Route 53 then checks the health of the endpoint.
Note
If you don't specify a value for IPAddress , Amazon Route 53 uses only IPv4 to send health checks to the endpoint. If there's no resource record set with a type of A for the name that you specify for FullyQualifiedDomainName , the health check fails with a 'DNS resolution failed' error.
If you want to check the health of weighted, latency, or failover resource record sets and you choose to specify the endpoint only by FullyQualifiedDomainName , we recommend that you create a separate health check for each endpoint. For example, create a health check for each HTTP server that is serving content for www.example.com. For the value of FullyQualifiedDomainName , specify the domain name of the server (such as us-east-2-www.example.com ), not the name of the resource record sets (www.example.com).
Warning
In this configuration, if the value of FullyQualifiedDomainName matches the name of the resource record sets and you then associate the health check with those resource record sets, health check results will be unpredictable.
In addition, if the value of Type is HTTP , HTTPS , HTTP_STR_MATCH , or HTTPS_STR_MATCH , Amazon Route 53 passes the value of FullyQualifiedDomainName in the Host header, as it does when you specify a value for IPAddress . If the value of Type is TCP , Amazon Route 53 doesn't pass a Host header.
:type SearchString: string
:param SearchString: If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH , the string that you want Amazon Route 53 to search for in the response body from the specified resource. If the string appears in the response body, Amazon Route 53 considers the resource healthy. (You can't change the value of Type when you update a health check.)
:type FailureThreshold: integer
:param FailureThreshold: The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. For more information, see How Amazon Route 53 Determines Whether an Endpoint Is Healthy in the Amazon Route 53 Developer Guide .
If you don't specify a value for FailureThreshold , the default value is three health checks.
:type Inverted: boolean
:param Inverted: Specify whether you want Amazon Route 53 to invert the status of a health check, for example, to consider a health check unhealthy when it otherwise would be considered healthy.
:type HealthThreshold: integer
:param HealthThreshold: The number of child health checks that are associated with a CALCULATED health that Amazon Route 53 must consider healthy for the CALCULATED health check to be considered healthy. To specify the child health checks that you want to associate with a CALCULATED health check, use the ChildHealthChecks and ChildHealthCheck elements.
Note the following:
If you specify a number greater than the number of child health checks, Amazon Route 53 always considers this health check to be unhealthy.
If you specify 0 , Amazon Route 53 always considers this health check to be healthy.
:type ChildHealthChecks: list
:param ChildHealthChecks: A complex type that contains one ChildHealthCheck element for each health check that you want to associate with a CALCULATED health check.
(string) --
:type EnableSNI: boolean
:param EnableSNI: Specify whether you want Amazon Route 53 to send the value of FullyQualifiedDomainName to the endpoint in the client_hello message during TLS negotiation. This allows the endpoint to respond to HTTPS health check requests with the applicable SSL/TLS certificate.
Some endpoints require that HTTPS requests include the host name in the client_hello message. If you don't enable SNI, the status of the health check will be SSL alert handshake_failure . A health check can also have that status for other reasons. If SNI is enabled and you're still getting the error, check the SSL/TLS configuration on your endpoint and confirm that your certificate is valid.
The SSL/TLS certificate on your endpoint includes a domain name in the Common Name field and possibly several more in the Subject Alternative Names field. One of the domain names in the certificate should match the value that you specify for FullyQualifiedDomainName . If the endpoint responds to the client_hello message with a certificate that does not include the domain name that you specified in FullyQualifiedDomainName , a health checker will retry the handshake. In the second attempt, the health checker will omit FullyQualifiedDomainName from the client_hello message.
:type Regions: list
:param Regions: A complex type that contains one Region element for each region that you want Amazon Route 53 health checkers to check the specified endpoint from.
(string) --
:type AlarmIdentifier: dict
:param AlarmIdentifier: A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy.
Region (string) -- [REQUIRED]A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy.
For the current list of CloudWatch regions, see Amazon CloudWatch in the AWS Regions and Endpoints chapter of the Amazon Web Services General Reference .
Name (string) -- [REQUIRED]The name of the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy.
:type InsufficientDataHealthStatus: string
:param InsufficientDataHealthStatus: When CloudWatch has insufficient data about the metric to determine the alarm state, the status that you want Amazon Route 53 to assign to the health check:
Healthy : Amazon Route 53 considers the health check to be healthy.
Unhealthy : Amazon Route 53 considers the health check to be unhealthy.
LastKnownStatus : Amazon Route 53 uses the status of the health check from the last time CloudWatch had sufficient data to determine the alarm state. For new health checks that have no last known status, the default status for the health check is healthy.
:rtype: dict
:return: {
'HealthCheck': {
'Id': 'string',
'CallerReference': 'string',
'HealthCheckConfig': {
'IPAddress': 'string',
'Port': 123,
'Type': 'HTTP'|'HTTPS'|'HTTP_STR_MATCH'|'HTTPS_STR_MATCH'|'TCP'|'CALCULATED'|'CLOUDWATCH_METRIC',
'ResourcePath': 'string',
'FullyQualifiedDomainName': 'string',
'SearchString': 'string',
'RequestInterval': 123,
'FailureThreshold': 123,
'MeasureLatency': True|False,
'Inverted': True|False,
'HealthThreshold': 123,
'ChildHealthChecks': [
'string',
],
'EnableSNI': True|False,
'Regions': [
'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1',
],
'AlarmIdentifier': {
'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-central-1'|'eu-west-1'|'eu-west-2'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1',
'Name': 'string'
},
'InsufficientDataHealthStatus': 'Healthy'|'Unhealthy'|'LastKnownStatus'
},
'HealthCheckVersion': 123,
'CloudWatchAlarmConfiguration': {
'EvaluationPeriods': 123,
'Threshold': 123.0,
'ComparisonOperator': 'GreaterThanOrEqualToThreshold'|'GreaterThanThreshold'|'LessThanThreshold'|'LessThanOrEqualToThreshold',
'Period': 123,
'MetricName': 'string',
'Namespace': 'string',
'Statistic': 'Average'|'Sum'|'SampleCount'|'Maximum'|'Minimum',
'Dimensions': [
{
'Name': 'string',
'Value': 'string'
},
]
}
}
}
:returns:
IPv4 address : four values between 0 and 255, separated by periods (.), for example, 192.0.2.44 .
IPv6 address : eight groups of four hexadecimal values, separated by colons (:), for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 . You can also shorten IPv6 addresses as described in RFC 5952, for example, 2001:db8:85a3::abcd:1:2345 .
"""
pass
def update_hosted_zone_comment(Id=None, Comment=None):
"""
Updates the comment for a specified hosted zone.
See also: AWS API Documentation
:example: response = client.update_hosted_zone_comment(
Id='string',
Comment='string'
)
:type Id: string
:param Id: [REQUIRED]
The ID for the hosted zone that you want to update the comment for.
:type Comment: string
:param Comment: The new comment for the hosted zone. If you don't specify a value for Comment , Amazon Route 53 deletes the existing value of the Comment element, if any.
:rtype: dict
:return: {
'HostedZone': {
'Id': 'string',
'Name': 'string',
'CallerReference': 'string',
'Config': {
'Comment': 'string',
'PrivateZone': True|False
},
'ResourceRecordSetCount': 123
}
}
"""
pass
def update_traffic_policy_comment(Id=None, Version=None, Comment=None):
"""
Updates the comment for a specified traffic policy version.
See also: AWS API Documentation
:example: response = client.update_traffic_policy_comment(
Id='string',
Version=123,
Comment='string'
)
:type Id: string
:param Id: [REQUIRED]
The value of Id for the traffic policy that you want to update the comment for.
:type Version: integer
:param Version: [REQUIRED]
The value of Version for the traffic policy that you want to update the comment for.
:type Comment: string
:param Comment: [REQUIRED]
The new comment for the specified traffic policy and version.
:rtype: dict
:return: {
'TrafficPolicy': {
'Id': 'string',
'Version': 123,
'Name': 'string',
'Type': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA',
'Document': 'string',
'Comment': 'string'
}
}
"""
pass
def update_traffic_policy_instance(Id=None, TTL=None, TrafficPolicyId=None, TrafficPolicyVersion=None):
"""
Updates the resource record sets in a specified hosted zone that were created based on the settings in a specified traffic policy version.
When you update a traffic policy instance, Amazon Route 53 continues to respond to DNS queries for the root resource record set name (such as example.com) while it replaces one group of resource record sets with another. Amazon Route 53 performs the following operations:
See also: AWS API Documentation
:example: response = client.update_traffic_policy_instance(
Id='string',
TTL=123,
TrafficPolicyId='string',
TrafficPolicyVersion=123
)
:type Id: string
:param Id: [REQUIRED]
The ID of the traffic policy instance that you want to update.
:type TTL: integer
:param TTL: [REQUIRED]
The TTL that you want Amazon Route 53 to assign to all of the updated resource record sets.
:type TrafficPolicyId: string
:param TrafficPolicyId: [REQUIRED]
The ID of the traffic policy that you want Amazon Route 53 to use to update resource record sets for the specified traffic policy instance.
:type TrafficPolicyVersion: integer
:param TrafficPolicyVersion: [REQUIRED]
The version of the traffic policy that you want Amazon Route 53 to use to update resource record sets for the specified traffic policy instance.
:rtype: dict
:return: {
'TrafficPolicyInstance': {
'Id': 'string',
'HostedZoneId': 'string',
'Name': 'string',
'TTL': 123,
'State': 'string',
'Message': 'string',
'TrafficPolicyId': 'string',
'TrafficPolicyVersion': 123,
'TrafficPolicyType': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA'
}
}
:returns:
Id (string) -- [REQUIRED]
The ID of the traffic policy instance that you want to update.
TTL (integer) -- [REQUIRED]
The TTL that you want Amazon Route 53 to assign to all of the updated resource record sets.
TrafficPolicyId (string) -- [REQUIRED]
The ID of the traffic policy that you want Amazon Route 53 to use to update resource record sets for the specified traffic policy instance.
TrafficPolicyVersion (integer) -- [REQUIRED]
The version of the traffic policy that you want Amazon Route 53 to use to update resource record sets for the specified traffic policy instance.
"""
pass
| 62.817881
| 787
| 0.694812
| 28,214
| 208,681
| 5.125292
| 0.041752
| 0.039113
| 0.034612
| 0.007469
| 0.86988
| 0.84809
| 0.833955
| 0.817012
| 0.809585
| 0.799232
| 0
| 0.013584
| 0.248231
| 208,681
| 3,321
| 788
| 62.836796
| 0.908172
| 0.917798
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
50b81a75df91cbbb77f2c2b72d79748984400ccc
| 5,728
|
py
|
Python
|
chapter01_basic/plotkernel.py
|
aaazzz640/cookbook-2nd-code
|
c0edeb78fe5a16e64d1210437470b00572211a82
|
[
"MIT"
] | null | null | null |
chapter01_basic/plotkernel.py
|
aaazzz640/cookbook-2nd-code
|
c0edeb78fe5a16e64d1210437470b00572211a82
|
[
"MIT"
] | null | null | null |
chapter01_basic/plotkernel.py
|
aaazzz640/cookbook-2nd-code
|
c0edeb78fe5a16e64d1210437470b00572211a82
|
[
"MIT"
] | null | null | null |
from ipykernel.kernelbase import Kernel
import numpy as np
import matplotlib.pyplot as plt
from io import BytesIO
import urllib, base64
def _to_png(fig):
"""Return a base64-encoded PNG from a
matplotlib figure."""
imgdata = BytesIO()
fig.savefig(imgdata, format='png')
imgdata.seek(0)
return urllib.parse.quote(
base64.b64encode(imgdata.getvalue()))
_numpy_namespace = {n: getattr(np, n)
for n in dir(np)}
def _parse_function(code):
"""Return a NumPy function from a
string 'y=f(x)'."""
return lambda x: eval(code.split('=')[1].strip(),
_numpy_namespace, {'x': x})
class PlotKernel(Kernel):
implementation = 'Plot'
implementation_version = '1.0'
language = 'python' # will be used for
# syntax highlighting
language_version = '3.6'
language_info = {'name': 'plotter',
'mimetype': 'text/plain',
'extension': '.py'}
banner = "Simple plotting"
def do_execute(self, code, silent,
store_history=True,
user_expressions=None,
allow_stdin=False):
# We create the plot with matplotlib.
fig, ax = plt.subplots(1, 1, figsize=(6,4),
dpi=100)
x = np.linspace(-5., 5., 200)
functions = code.split('\n')
for fun in functions:
f = _parse_function(fun)
y = f(x)
ax.plot(x, y)
ax.set_xlim(-5, 5)
# We create a PNG out of this plot.
png = _to_png(fig)
if not silent:
# We send the standard output to the
# client.
self.send_response(
self.iopub_socket,
'stream', {
'name': 'stdout',
'data': ('Plotting {n} '
'function(s)'). \
format(n=len(functions))})
# We prepare the response with our rich
# data (the plot).
content = {
'source': 'kernel',
# This dictionary may contain
# different MIME representations of
# the output.
'data': {
'image/png': png
},
# We can specify the image size
# in the metadata field.
'metadata' : {
'image/png' : {
'width': 600,
'height': 400
}
}
}
# We send the display_data message with
# the contents.
self.send_response(self.iopub_socket,
'display_data', content)
# We return the exection results.
return {'status': 'ok',
'execution_count':
self.execution_count,
'payload': [],
'user_expressions': {},
}
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(
kernel_class=PlotKernel)
class PlotKernel(Kernel):
implementation = 'Plot'
implementation_version = '1.0'
language = 'python' # will be used for
# syntax highlighting
language_version = '3.6'
language_info = {'name': 'plotter',
'mimetype': 'text/plain',
'extension': '.py'}
banner = "Simple plotting"
def do_execute(self, code, silent,
store_history=True,
user_expressions=None,
allow_stdin=False):
# We create the plot with matplotlib.
fig, ax = plt.subplots(1, 1, figsize=(6,4),
dpi=100)
x = np.linspace(-5., 5., 200)
functions = code.split('\n')
for fun in functions:
f = _parse_function(fun)
y = f(x)
ax.plot(x, y)
ax.set_xlim(-5, 5)
# We create a PNG out of this plot.
png = _to_png(fig)
if not silent:
# We send the standard output to the
# client.
self.send_response(
self.iopub_socket,
'stream', {
'name': 'stdout',
'data': ('Plotting {n} '
'function(s)'). \
format(n=len(functions))})
# We prepare the response with our rich
# data (the plot).
content = {
'source': 'kernel',
# This dictionary may contain
# different MIME representations of
# the output.
'data': {
'image/png': png
},
# We can specify the image size
# in the metadata field.
'metadata' : {
'image/png' : {
'width': 600,
'height': 400
}
}
}
# We send the display_data message with
# the contents.
self.send_response(self.iopub_socket,
'display_data', content)
# We return the exection results.
return {'status': 'ok',
'execution_count':
self.execution_count,
'payload': [],
'user_expressions': {},
}
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(
kernel_class=PlotKernel)
| 30.795699
| 54
| 0.463513
| 545
| 5,728
| 4.743119
| 0.288073
| 0.023211
| 0.013927
| 0.030948
| 0.841006
| 0.841006
| 0.841006
| 0.841006
| 0.841006
| 0.841006
| 0
| 0.017985
| 0.436976
| 5,728
| 185
| 55
| 30.962162
| 0.783566
| 0.163059
| 0
| 0.793651
| 0
| 0
| 0.100991
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031746
| false
| 0
| 0.055556
| 0
| 0.230159
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0fb288905a262d927bbb06d649fe5b7a7a69a6c7
| 9,700
|
py
|
Python
|
tests/test_pruning.py
|
justinbois/eqtk
|
7363b8c09e35088d2cb2cb5a62d315b52cce0d9b
|
[
"MIT"
] | 2
|
2020-05-17T05:34:40.000Z
|
2020-05-17T12:40:14.000Z
|
tests/test_pruning.py
|
justinbois/eqtk
|
7363b8c09e35088d2cb2cb5a62d315b52cce0d9b
|
[
"MIT"
] | 1
|
2021-12-07T01:20:48.000Z
|
2021-12-07T01:20:48.000Z
|
tests/test_pruning.py
|
justinbois/eqtk
|
7363b8c09e35088d2cb2cb5a62d315b52cce0d9b
|
[
"MIT"
] | null | null | null |
import numpy as np
import eqtk
def test_prune_NK():
N = np.array(
[
[-1, 1, 0, 0, 0, 0],
[-1, 0, -1, 1, 0, 0],
[0, -2, 0, 0, 1, 0],
[0, -1, -1, 0, 0, 1],
],
dtype=float,
)
minus_log_K = np.array([1, 2, 3, 4], dtype=float)
# All present
x0 = np.array([1, 2, 3, 4, 5, 6], dtype=float)
N_new, minus_log_K_new, x0_new, _, _ = eqtk.solvers._prune_NK(N, minus_log_K, x0)
assert np.array_equal(N_new, N)
assert np.array_equal(minus_log_K_new, minus_log_K)
assert np.array_equal(x0_new, x0)
# Also all present
x0 = np.array([1, 0, 3, 0, 0, 0], dtype=float)
N_new, minus_log_K_new, x0_new, _, _ = eqtk.solvers._prune_NK(N, minus_log_K, x0)
assert np.array_equal(N_new, N)
assert np.array_equal(minus_log_K_new, minus_log_K)
assert np.array_equal(x0_new, x0)
# Also all present
x0 = np.array([0, 0, 0, 0, 0, 6], dtype=float)
N_new, minus_log_K_new, x0_new, _, _ = eqtk.solvers._prune_NK(N, minus_log_K, x0)
assert np.array_equal(N_new, N)
assert np.array_equal(minus_log_K_new, minus_log_K)
assert np.array_equal(x0_new, x0)
# Only entries 0, 1 and 4 present
x0 = np.array([0, 2, 0, 0, 0, 0], dtype=float)
x0_target = np.array([0, 2, 0], dtype=float)
N_target = np.array([[-1, 1, 0], [0, -2, 1]], dtype=float)
minus_log_K_target = np.array([1, 3], dtype=float)
N_new, minus_log_K_new, x0_new, _, _ = eqtk.solvers._prune_NK(N, minus_log_K, x0)
assert np.array_equal(N_new, N_target)
assert np.array_equal(minus_log_K_new, minus_log_K_target)
assert np.array_equal(x0_new, x0_target)
# Only entry 2 present
x0 = np.array([0, 0, 3, 0, 0, 0], dtype=float)
N_target = np.array([[]])
minus_log_K_target = np.array([])
N_new, minus_log_K_new, x0_new, _, _ = eqtk.solvers._prune_NK(N, minus_log_K, x0)
assert np.array_equal(N_new, N_target)
assert np.array_equal(minus_log_K_new, minus_log_K_target)
assert np.array_equal(x0_new, x0)
N = np.array(
[
[-1, 0, 1, 0, 0, 0],
[-1, -1, 0, 1, 0, 0],
[0, -2, 0, 0, 1, 0],
[0, -1, -1, 0, 0, 1],
],
dtype=float,
)
minus_log_K = np.array([1, 2, 3, 4], dtype=float)
# All present
x0 = np.array([1, 2, 3, 4, 5, 6], dtype=float)
N_new, minus_log_K_new, x0_new, _, _ = eqtk.solvers._prune_NK(N, minus_log_K, x0)
assert np.array_equal(N_new, N)
assert np.array_equal(minus_log_K_new, minus_log_K)
assert np.array_equal(x0_new, x0)
# Also all present
x0 = np.array([1, 2, 0, 0, 0, 0], dtype=float)
N_new, minus_log_K_new, x0_new, _, _ = eqtk.solvers._prune_NK(N, minus_log_K, x0)
assert np.array_equal(N_new, N)
assert np.array_equal(minus_log_K_new, minus_log_K)
assert np.array_equal(x0_new, x0)
# Also all present
x0 = np.array([0, 0, 0, 0, 0, 6], dtype=float)
N_new, minus_log_K_new, x0_new, _, _ = eqtk.solvers._prune_NK(N, minus_log_K, x0)
assert np.array_equal(N_new, N)
assert np.array_equal(minus_log_K_new, minus_log_K)
assert np.array_equal(x0_new, x0)
# Only entries 0 and 2 present
x0 = np.array([1, 0, 3, 0, 0, 0], dtype=float)
x0_target = np.array([1, 3], dtype=float)
N_target = np.array([[-1, 1]], dtype=float)
minus_log_K_target = np.array([1], dtype=float)
N_new, minus_log_K_new, x0_new, _, _ = eqtk.solvers._prune_NK(N, minus_log_K, x0)
assert np.array_equal(N_new, N_target)
assert np.array_equal(minus_log_K_new, minus_log_K_target)
assert np.array_equal(x0_new, x0_target)
# Only entries 1 and 4 present
x0 = np.array([0, 2, 0, 0, 0, 0], dtype=float)
x0_target = np.array([2, 0], dtype=float)
N_target = np.array([[-2, 1]], dtype=float)
minus_log_K_target = np.array([3], dtype=float)
N_new, minus_log_K_new, x0_new, _, _ = eqtk.solvers._prune_NK(N, minus_log_K, x0)
assert np.array_equal(N_new, N_target)
assert np.array_equal(minus_log_K_new, minus_log_K_target)
assert np.array_equal(x0_new, x0_target)
# Reactions with solvent dissociation
N = np.array(
[[1, 0, 1, 0, -1, 0], [1, 0, 0, 1, 0, -1], [1, 1, 1, 0, 0, 0]], dtype=float
)
minus_log_K = np.array([1, 2, 3], dtype=float)
# No pruning
for x0_val in [
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1],
]:
x0 = np.array(x0_val, dtype=float)
N_new, minus_log_K_new, x0_new, _, _ = eqtk.solvers._prune_NK(
N, minus_log_K, x0
)
assert np.array_equal(N_new, N)
assert np.array_equal(minus_log_K_new, minus_log_K)
assert np.array_equal(x0_new, x0)
# Only entries 0, 1, 2, and 4
x0 = np.zeros(6, dtype=float)
x0_target = np.zeros(4, dtype=float)
N_target = np.array([[1, 0, 1, -1], [1, 1, 1, 0]], dtype=float)
minus_log_K_target = np.array([1.0, 3.0])
active_compounds_target = np.array([True, True, True, False, True, False])
(
N_new,
minus_log_K_new,
x0_new,
active_compounds,
active_reactions,
) = eqtk.solvers._prune_NK(N, minus_log_K, x0)
assert np.array_equal(active_compounds, active_compounds_target)
assert np.array_equal(N_new, N_target)
assert np.array_equal(minus_log_K_new, minus_log_K_target)
assert np.array_equal(x0_new, x0_target)
def test_prune_AG():
A = np.array([[1, 0, 1, 1, 0, 1], [0, 1, 0, 1, 2, 1]], dtype=float)
G = np.array([1, 2, 3, 4, 5, 6], dtype=float)
# No pruning
for x0_val in [[1, 2, 3, 4, 5, 6], [1, 2, 0, 0, 0, 0], [0, 0, 0, 0, 0, 6]]:
x0 = np.array(x0_val, dtype=float)
A_new, G_new, x0_new, active_compounds = eqtk.solvers._prune_AG(A, G, x0)
assert np.array_equal(active_compounds, np.ones(6, dtype=np.bool8))
assert np.array_equal(A_new, A)
assert np.array_equal(G_new, G)
assert np.array_equal(x0_new, x0)
# Only species 1 and 4
x0 = np.array([0, 0, 0, 0, 5, 0], dtype=float)
x0_prune = np.array([0, 5], dtype=float)
A_target = np.array([[1, 2]], dtype=float)
G_target = np.array([2, 5], dtype=float)
A_new, G_new, x0_new, active_compounds = eqtk.solvers._prune_AG(A, G, x0)
assert np.array_equal(
active_compounds, np.array([0, 1, 0, 0, 1, 0], dtype=np.bool8)
)
assert np.array_equal(A_new, A_target)
assert np.array_equal(G_new, G_target)
assert np.array_equal(x0_new, x0_prune)
# Simple case, binary binding
A = np.array([[1, 0, 1], [0, 1, 1]], dtype=float)
G = np.array([0, 0, 1], dtype=float)
# No pruning
for x0_val in [[1, 1, 1], [1, 1, 0], [0, 0, 1]]:
x0 = np.array(x0_val, dtype=float)
A_new, G_new, x0_new, active_compounds = eqtk.solvers._prune_AG(A, G, x0)
assert np.array_equal(active_compounds, np.ones(3, dtype=np.bool8))
assert np.array_equal(A_new, A)
assert np.array_equal(G_new, G)
assert np.array_equal(x0_new, x0)
# Only keep element 0
x0 = np.array([1, 0, 0], dtype=float)
x0_prune = np.array([1.0])
A_target = np.array([[1]], dtype=float)
G_target = np.array([0.0])
A_new, G_new, x0_new, active_compounds = eqtk.solvers._prune_AG(A, G, x0)
assert np.array_equal(active_compounds, np.array([1, 0, 0], dtype=np.bool8))
assert np.array_equal(A_new, A_target)
assert np.array_equal(G_new, G_target)
assert np.array_equal(x0_new, x0_prune)
# Only keep element 1
x0 = np.array([0, 1, 0], dtype=float)
x0_prune = np.array([1.0])
A_target = np.array([[1]], dtype=float)
G_target = np.array([0.0])
A_new, G_new, x0_new, active_compounds = eqtk.solvers._prune_AG(A, G, x0)
assert np.array_equal(active_compounds, np.array([0, 1, 0], dtype=np.bool8))
assert np.array_equal(A_new, A_target)
assert np.array_equal(G_new, G_target)
assert np.array_equal(x0_new, x0_prune)
# A trickier case
A = np.array(
[[1, 0, 1, 1, 1], [0, 1, 2, 1, 0], [1, 1, 0, 1, 0], [0, 0, 0, 0, 1]],
dtype=float,
)
G = np.array([1, 2, 3, 4, 5], dtype=float)
# No pruning
for x0_val in [[1, 1, 0, 0, 1], [0, 0, 0, 1, 1], [0, 1, 0, 0, 1], [1, 1, 1, 1, 1]]:
x0 = np.array(x0_val, dtype=float)
A_new, G_new, x0_new, active_compounds = eqtk.solvers._prune_AG(A, G, x0)
assert np.array_equal(active_compounds, np.ones(5, dtype=np.bool8))
assert np.array_equal(A_new, A)
assert np.array_equal(G_new, G)
assert np.array_equal(x0_new, x0)
# Only entry 2
x0 = np.array([0, 0, 1, 0, 0], dtype=float)
x0_prune = np.array([1], dtype=float)
A_target = np.array([[1], [2]], dtype=float)
G_target = np.array([3], dtype=float)
A_new, G_new, x0_new, active_compounds = eqtk.solvers._prune_AG(A, G, x0)
assert np.array_equal(active_compounds, np.array([0, 0, 1, 0, 0], dtype=np.bool8))
assert np.array_equal(A_new, A_target)
assert np.array_equal(G_new, G_target)
assert np.array_equal(x0_new, x0_prune)
# All but last entry
A_target = A[:-1, :-1]
G_target = np.array([1, 2, 3, 4], dtype=float)
for x0_val in [[1, 1, 0, 0, 0], [0, 0, 0, 1, 0], [1, 1, 1, 1, 0]]:
x0 = np.array(x0_val, dtype=float)
x0_prune = x0[:-1]
A_new, G_new, x0_new, active_compounds = eqtk.solvers._prune_AG(A, G, x0)
assert np.array_equal(
active_compounds, np.array([1, 1, 1, 1, 0], dtype=np.bool8)
)
assert np.array_equal(A_new, A_target)
assert np.array_equal(G_new, G_target)
assert np.array_equal(x0_new, x0_prune)
| 38.8
| 87
| 0.610928
| 1,771
| 9,700
| 3.087521
| 0.036702
| 0.168983
| 0.164045
| 0.22714
| 0.938917
| 0.927762
| 0.908376
| 0.873811
| 0.836138
| 0.819495
| 0
| 0.070582
| 0.234639
| 9,700
| 249
| 88
| 38.955823
| 0.665948
| 0.046082
| 0
| 0.570707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.348485
| 1
| 0.010101
| false
| 0
| 0.010101
| 0
| 0.020202
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0fb88485e5bb447c146ec188f9ba8c3c858bc89b
| 7,121
|
py
|
Python
|
tutorial-contents/test.py
|
LishudaNoBug/learning_PyTorch
|
1026035a9cb3d70e2fe97363b532e63db3ca136d
|
[
"MIT"
] | 1
|
2021-05-20T09:30:16.000Z
|
2021-05-20T09:30:16.000Z
|
tutorial-contents/test.py
|
LishudaNoBug/learning_PyTorch
|
1026035a9cb3d70e2fe97363b532e63db3ca136d
|
[
"MIT"
] | null | null | null |
tutorial-contents/test.py
|
LishudaNoBug/learning_PyTorch
|
1026035a9cb3d70e2fe97363b532e63db3ca136d
|
[
"MIT"
] | null | null | null |
import jaydebeapi
dirver = 'org.h2.Driver'
url = 'jdbc:h2:tcp://192.168.0.242:9101/~/ship5'
username = 'sa'
password = ''
jar = 'D:\development\h2gis-standalone\h2gis-dist-1.5.0.jar'
conn = jaydebeapi.connect(dirver, url, [username, password], jar)
curs = conn.cursor()
LINESTRING="LINESTRING(123.85507399897297 36.35646292498599,123.76116568903645 36.54704329302798,123.7572041259261 36.55419585040103,123.64867598871906 36.72953078082095,123.52717406611164 36.89056250384341,123.47056777338703 36.95641753008853,123.37236220698078 37.06010481646548,123.36652953486164 37.06589934161197,123.16477019648273 37.24630019000064,122.926736901805 37.42191740801822,122.87312896113117 37.45738264849673,122.77634436945637 37.51833388140689,122.72367102961262 37.54999968340884,122.71519858698566 37.55500075152408,122.70384127955158 37.561666934995756,122.6467018828841 37.59455534747134,122.63826281885822 37.59932371905337,122.61623866419514 37.6116661720563,122.60121829371174 37.61999937823306,122.59221846918781 37.624960392027006,122.59214598993977 37.625000446348295,122.58303267817219 37.6299996071149,122.56774337153156 37.638332813291655,122.54923446039875 37.64833304217349,122.51684577326496 37.66560599139224,122.47554976801594 37.68722578814517,122.47163970331867 37.689249485044584,122.47019011835773 37.68999907305728,122.4362698302718 37.70738837054263,122.32436377863606 37.762844532041655,122.28107268671711 37.7835640602399,122.27314765314777 37.78731581500064,122.25967604975422 37.79366347125064,122.14418036799152 37.846659153013334,122.08873374323566 37.871250598936186,122.07710654597004 37.87634131243716,122.06227881769856 37.8828015022565,122.01850516657551 37.90166708758365,121.99108321528156 37.91333243182193,121.94743926386555 37.93166586688052,121.94541556696613 37.932508914976225,121.93493468622883 37.9368672066022,121.87482840876301 37.96155974200259,121.86805350641926 37.96431204607974,121.70179183344563 38.02999922564517,121.6780215011092 38.039116352110014,121.67709834437092 38.039469211607084,121.66394908289631 38.04448363116275,121.56544787745197 38.08144041827212,121.42016322474201 38.134134738950834,121.41327197413166 38.136583774595366,121.40484149317463 38.139574497251616,121.39893157343586 38.14166685870181,121.33697135309895 38.16341635516177,121.32283027033527 38.16833349993716,121.29391677241047 38.178333728819,121.2793865905257 38.1833328895856,121.2452765212508 38.195000141172514,121.22566325525959 38.201666324644194,121.14049250940998 38.23026701739322,121.11883551935871 38.23745199969302,121.08451468805988 38.2487682991315,121.07568080286701 38.25166746905337,121.00910575251301 38.27334067156802,120.8505421386214 38.32378241351138,120.8304348693343 38.3300671272565,120.79810244898518 38.340122669248686,120.60154254297932 38.40000006487857,120.58482272486408 38.40499922564517,120.53309256891926 38.42038008502017,120.5222073302718 38.42359968951236,120.51112182001789 38.426872699766264,120.47818572382648 38.436563938169584,120.46077544550617 38.44166609576236,120.44477469782551 38.446342914610014,120.27012259821613 38.49666640093814,120.26233298639973 38.498880832700834,120.24665076594074 38.50333258440982,120.21654708246906 38.51185080340396,120.19355208735188 38.51833388140689,120.18010337214191 38.522116153745756,120.15784461359699 38.528360813169584,120.14007575373371 38.53333327105533,120.11020095209797 38.541666477232084,120.08621985773762 38.548332660703764,120.074197839305 38.55166670611392,120.06818206171711 38.553333728819,120.06018645624836 38.55554625323306,120.04593093256672 38.55948683550845,120.03803069452961 38.561666934995756,120.0186501250716 38.56700751116763,120.0077687011214 38.570000141172514,119.99563033442219 38.57333418658267,119.9678688750716 38.58094069293033,119.85980231623371 38.61031576922427,119.85480124811848 38.6116661720563,119.83626181940754 38.61666724017154,119.81768805842121 38.62166640093814,119.81192786555012 38.62321516802798,119.8052826629134 38.625000446348295,119.80380446772297 38.62539717486392,119.80141074518879 38.62603995135318,119.78981406550129 38.629150836973295,119.78875357966145 38.629435031919584,119.78481871943195 38.63048979571353,119.78042418818195 38.631666629819975,119.77748305659016 38.63245436480533,119.75163085322102 38.63936659625064,119.74908072809895 38.64004751971255,119.74705130915363 38.64058920672427,119.7415619597884 38.64205405047427,119.73676307062824 38.64333388140689,119.73050696711262 38.64500090411197,119.72956282953938 38.6452526741315,119.7294769988509 38.64527556231509,119.72776515345295 38.645731418638334,119.72425563196857 38.64666601946841,119.72285849909504 38.64703795245181,119.71803672175129 38.648321598081694,119.71542556147297 38.64901587298404,119.70783812861164 38.65103384783755,119.70625884394367 38.65145346453677,119.70618636469563 38.6514725380231,119.7051678405257 38.65174338152896,119.70478064875324 38.65184637835513,119.70437056879719 38.6519550972272,119.70376021723469 38.65211722186099,119.70342452387531 38.65220686724673,119.70310027460773 38.65229269793521,119.70290763239582 38.652344196348295,119.70287806849201 38.65235182574283,119.7028713927718 38.65235373309146,119.70285613398273 38.652357547788725,119.70274169306477 38.65238806536685,119.70245463709553 38.65246435931216,119.70242507319172 38.652471988706694,119.70241172175129 38.65247580340396,119.70228965143879 38.65250822833072,119.70218188624104 38.65253683856021,119.70206649164875 38.652567356138334,119.6991806731673 38.653334110288725,119.65940768580158 38.66387602618228,119.56526858668049 38.688665836362944,119.56285960535725 38.689297168760405,119.46577269892414 38.71463248065005,119.44843489985188 38.71913382342349,119.36452300409992 38.7408280067731,119.31309993128498 38.754049747495756,119.28042037348469 38.762424915342436,119.2668362365218 38.76590010455142,119.19580085138996 38.78401610186587,119.19523055414875 38.78416106036197,119.19337088923176 38.784634082822905,119.18909556727131 38.78572127154361,119.18796260218342 38.78600928118716,119.18763167719563 38.786093204527006,119.18678481440266 38.786308734922514,119.18646247248371 38.786390750913725,119.18619925837238 38.78645750811587,119.18592173914631 38.78652808001529,119.13034255366047 38.80062910845767,119.1293621764632 38.800877063779936,119.12188823084553 38.802769153623686,119.10350997309406 38.80741736224185,119.07327563624104 38.815052478819,118.98544795374592 38.83714911273013,118.96637256006916 38.841932743101225,118.92140014033039 38.853189914732084,118.78167159418781 38.88799139788638,118.71773345331867 38.903835742979155,118.71703917841633 38.90400740435611,118.71449572901447 38.904636829404936,118.71165854792316 38.90533873370181,118.59876067499836 38.9331993751813,118.58602912287434 38.936333148985014,118.47785765986164 38.96290060809146,118.38089949946125 38.986633747129545)"
sql = "select * from world_country where geom && st_geomfromtext('"+LINESTRING+"') and st_intersects(st_geomfromtext('"+LINESTRING+"'),geom)"
curs.execute(sql)
result = curs.fetchall()[0][0]
print(result)
# for row in result:
# print(row[0], row[1], row[2])
| 339.095238
| 6,583
| 0.876562
| 789
| 7,121
| 7.90621
| 0.518378
| 0.003527
| 0.007695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.854236
| 0.033703
| 7,121
| 20
| 6,584
| 356.05
| 0.052318
| 0.007302
| 0
| 0
| 0
| 0.076923
| 0.960079
| 0.919734
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.153846
| 0.076923
| 0
| 0.076923
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
ba250c7e97a3516db9e3de8a21d082b1d97fcf02
| 80
|
py
|
Python
|
trainer/__init__.py
|
DaseiNaN/TSE_VF
|
f31f8ba89383956ef72904d1a9bb68cee4b79b1a
|
[
"MIT"
] | null | null | null |
trainer/__init__.py
|
DaseiNaN/TSE_VF
|
f31f8ba89383956ef72904d1a9bb68cee4b79b1a
|
[
"MIT"
] | null | null | null |
trainer/__init__.py
|
DaseiNaN/TSE_VF
|
f31f8ba89383956ef72904d1a9bb68cee4b79b1a
|
[
"MIT"
] | null | null | null |
from .dpcl_trainer import *
from .vf_trainer import *
from .pit_trainer import *
| 26.666667
| 27
| 0.7875
| 12
| 80
| 5
| 0.5
| 0.65
| 0.566667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1375
| 80
| 3
| 28
| 26.666667
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e84459d3409b63f56e24147daf4964e402c394a3
| 107
|
py
|
Python
|
src/couche_network/protocol_sercos.py
|
adrienKoumgangT/offline-internet-packet-analyzer
|
84125521b746a3ced37c999c30156033eee1aba9
|
[
"MIT"
] | 1
|
2021-12-13T20:36:58.000Z
|
2021-12-13T20:36:58.000Z
|
src/couche_network/protocol_sercos.py
|
adrienKoumgangT/offline-internet-packet-analyzer
|
84125521b746a3ced37c999c30156033eee1aba9
|
[
"MIT"
] | null | null | null |
src/couche_network/protocol_sercos.py
|
adrienKoumgangT/offline-internet-packet-analyzer
|
84125521b746a3ced37c999c30156033eee1aba9
|
[
"MIT"
] | 1
|
2021-12-13T20:37:01.000Z
|
2021-12-13T20:37:01.000Z
|
from utils.utils import *
def analyse_packet_sercos_iii(packet: list):
return {"SERCOS III": "TODO"}
| 17.833333
| 44
| 0.719626
| 15
| 107
| 4.933333
| 0.733333
| 0.243243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158879
| 107
| 5
| 45
| 21.4
| 0.822222
| 0
| 0
| 0
| 0
| 0
| 0.130841
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
e84c034437c0b24424257da2946c1876a2328306
| 111,479
|
py
|
Python
|
Mscale_GKN(tf2)/MS_LaplaceEqs.py
|
Blue-Giant/Mscale_GKN
|
004664937e8adebc2dcf4bb8172cf4c0498b315a
|
[
"MIT"
] | null | null | null |
Mscale_GKN(tf2)/MS_LaplaceEqs.py
|
Blue-Giant/Mscale_GKN
|
004664937e8adebc2dcf4bb8172cf4c0498b315a
|
[
"MIT"
] | null | null | null |
Mscale_GKN(tf2)/MS_LaplaceEqs.py
|
Blue-Giant/Mscale_GKN
|
004664937e8adebc2dcf4bb8172cf4c0498b315a
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import matData2pLaplace
# 这里注意一下: 对于 np.ones_like(x), x要是一个有实际意义的树或数组或矩阵才可以。不可以是 tensorflow 占位符
# 如果x是占位符,要使用 tf.ones_like
# 偏微分方程的一些信息:边界条件,初始条件,真解,右端项函数
def get_infos2pLaplace_1D(in_dim=None, out_dim=None, intervalL=0, intervalR=1, index2p=2, eps=0.01, equa_name=None):
aeps = lambda x: 1.0 / (2 + tf.cos(2 * np.pi * x / eps))
utrue = lambda x: x - tf.square(x) + eps * (
1 / np.pi * tf.sin(np.pi * 2 * x / eps) * (1 / 4 - x / 2) - eps / (4 * np.pi ** 2) * tf.cos(
np.pi * 2 * x / eps) + eps / 4 / np.pi ** 2)
u_l = lambda x: tf.zeros_like(x)
u_r = lambda x: tf.zeros_like(x)
if index2p == 2:
f = lambda x: tf.ones_like(x)
elif index2p == 3:
f = lambda x: abs(2 * x - 1) * (
4 * eps + 2 * eps * tf.cos(2 * np.pi * x / eps) + np.pi * (1 - 2 * x) * tf.sin(2 * np.pi * x / eps)) / (
2 * eps)
elif index2p == 4:
f = lambda x: ((1 - 2 * x) ** 2) * (2 + tf.cos(2 * np.pi * x / eps)) * (
6 * eps + 3 * eps * tf.cos(2 * np.pi * x / eps) - 2 * np.pi * (2 * x - 1) * tf.sin(
2 * np.pi * x / eps)) / (
4 * eps)
elif index2p == 5:
f = lambda x: -1.0 * abs((2 * x - 1) ** 3) * ((2 + tf.cos(2 * np.pi * x / eps)) ** 2) * (
3 * np.pi * (2 * x - 1) * tf.sin(2 * np.pi * x / eps) - 4 * eps * tf.cos(
2 * np.pi * x / eps) - 8 * eps) / (
8 * eps)
elif index2p == 8:
f = lambda x: ((1 - 2 * x) ** 6) * ((2 + tf.cos(2 * np.pi * x / eps)) ** 5) * (
7 * eps * tf.cos(2 * np.pi * x / eps) + 2 * (
7 * eps - 3 * np.pi * (2 * x - 1) * tf.sin(2 * np.pi * x / eps))) / (
64 * eps)
else:
f = lambda x: (np.power(abs(1 - 2 * x), index2p) * np.power(2 + tf.cos(2 * np.pi * x / eps), index2p) * (
eps * (index2p - 1) * (2 + tf.cos(2 * np.pi * x / eps)) - np.pi * (index2p - 2) * (2 * x - 1) * tf.sin(
2 * np.pi * x / eps))) / (
np.power(2, index2p - 2) * eps * ((1 - 2 * x) ** 2) * (
(2 + tf.cos(2 * np.pi * x / eps)) ** 3))
return utrue, f, aeps, u_l, u_r
def get_infos2pLaplace_1D_2(in_dim=None, out_dim=None, intervalL=0, intervalR=1, index2p=2, eps1=0.02, eps2=0.01, equa_name=None):
aeps = lambda x: (2 + tf.cos(2 * np.pi * x / eps1)) * (2 + tf.cos(2 * np.pi * x / eps2))
utrue = lambda x: x - tf.square(x) + (eps1/(4*np.pi))*tf.sin(2*np.pi*x/eps1) + (eps2/(4*np.pi))*tf.sin(2*np.pi*x/eps2)
u_l = lambda x: tf.zeros_like(x)
u_r = lambda x: tf.zeros_like(x)
f = lambda x: tf.ones_like(x)
return utrue, f, aeps, u_l, u_r
def get_infos2pLaplace_1D_3(in_dim=None, out_dim=None, intervalL=0, intervalR=1, index2p=2, eps1=0.02, eps2=0.01, equa_name=None):
aeps = lambda x: 1.0/((2 + tf.cos(2 * np.pi * x / eps1)) * (2 + tf.cos(2 * np.pi * x / eps2)))
utrue = lambda x: x - tf.square(x) + (eps1/(4*np.pi))*tf.sin(2*np.pi*x/eps1) + (eps2/(4*np.pi))*tf.sin(2*np.pi*x/eps2)
u_l = lambda x: tf.zeros_like(x)
u_r = lambda x: tf.zeros_like(x)
ax = lambda x: -(2*np.pi/eps1)*tf.sin(2 * np.pi * x / eps1)*(1 + tf.cos(2 * np.pi * x / eps2)) - \
(2*np.pi/eps2)*tf.sin(2 * np.pi * x / eps2)*(1 + tf.cos(2 * np.pi * x / eps1))
ux = lambda x: 1 -2*x + 0.5*tf.cos(2 * np.pi * x / eps1) + 0.5*tf.cos(2 * np.pi * x / eps2)
uxx = lambda x: -2-(np.pi/eps1)*tf.sin(2 * np.pi * x / eps1)-(np.pi/eps2)*tf.sin(2 * np.pi * x / eps2)
f = lambda x: tf.ones_like(x)
return utrue, f, aeps, u_l, u_r
def get_infos2pLaplace_1D_4(in_dim=None, out_dim=None, intervalL=0, intervalR=1, index2p=2, eps1=0.02, eps2=0.01, equa_name=None):
aeps = lambda x: (2 + tf.cos(2 * np.pi * x / eps1)) + (2 + tf.cos(2 * np.pi * x / eps2))
utrue = lambda x: x - tf.square(x) + (eps1/(4*np.pi))*tf.sin(2*np.pi*x/eps1) + (eps2/(4*np.pi))*tf.sin(2*np.pi*x/eps2)
u_l = lambda x: tf.zeros_like(x)
u_r = lambda x: tf.zeros_like(x)
ax = lambda x: -(2*np.pi/eps1)*tf.sin(2 * np.pi * x / eps1)*(1 + tf.cos(2 * np.pi * x / eps2)) - \
(2*np.pi/eps2)*tf.sin(2 * np.pi * x / eps2)*(1 + tf.cos(2 * np.pi * x / eps1))
ux = lambda x: 1 -2*x + 0.5*tf.cos(2 * np.pi * x / eps1) + 0.5*tf.cos(2 * np.pi * x / eps2)
uxx = lambda x: -2-(np.pi/eps1)*tf.sin(2 * np.pi * x / eps1)-(np.pi/eps2)*tf.sin(2 * np.pi * x / eps2)
f = lambda x: tf.ones_like(x)
return utrue, f, aeps, u_l, u_r
def force_sice_3scale2(x, eps1=0.02, eps2=0.01):
aeps = (2 + tf.cos(2 * np.pi * x / eps1)) * (2 + tf.cos(2 * np.pi * x / eps2))
aepsx = -(2 * np.pi / eps1) * tf.sin(2 * np.pi * x / eps1) * (2 + tf.cos(2 * np.pi * x / eps2)) - \
(2 * np.pi / eps2) * tf.sin(2 * np.pi * x / eps2) * (2 + tf.cos(2 * np.pi * x / eps1))
ux = 1 - 2 * x + 0.5 * tf.cos(2 * np.pi * x / eps1) + 0.5 * tf.cos(2 * np.pi * x / eps2)
uxx = -2 - (np.pi / eps1) * tf.sin(2 * np.pi * x / eps1) - (np.pi / eps2) * tf.sin(2 * np.pi * x / eps2)
fside = -1.0*(aepsx * ux + aeps * uxx)
return fside
def force_sice_3scale3(x, eps1=0.02, eps2=0.01):
aeps = 1.0/((2 + tf.cos(2 * np.pi * x / eps1)) * (2 + tf.cos(2 * np.pi * x / eps2)))
ax1 = (2 + tf.cos(2 * np.pi * x / eps1))*(2 + tf.cos(2 * np.pi * x / eps1))*(2 + tf.cos(2 * np.pi * x / eps2))
ax2 = (2 + tf.cos(2 * np.pi * x / eps1)) * (2 + tf.cos(2 * np.pi * x / eps2)) * (2 + tf.cos(2 * np.pi * x / eps2))
ax = -(2 * np.pi / eps1) * tf.sin(2 * np.pi * x / eps1) * (1/ax1) - \
(2 * np.pi / eps2) * tf.sin(2 * np.pi * x / eps2) * (1/ax2)
ux = 1 - 2 * x + 0.5 * tf.cos(2 * np.pi * x / eps1) + 0.5 * tf.cos(2 * np.pi * x / eps2)
uxx = -2 - (np.pi / eps1) * tf.sin(2 * np.pi * x / eps1) - (np.pi / eps2) * tf.sin(2 * np.pi * x / eps2)
fside = -1.0*(ax * ux + aeps * uxx)
return fside
# 例一
def true_solution2E1(input_dim=None, output_dim=None, q=2, file_name=None):
mat_true = matData2pLaplace.loadMatlabIdata(file_name)
true_key = 'utrue'
utrue = mat_true[true_key]
return utrue
def force_side2E1(input_dim=None, output_dim=None):
f_side = lambda x, y: 1.0*tf.ones_like(x)
return f_side
def boundary2E1(input_dim=None, output_dim=None, left_bottom=0.0, right_top=1.0):
# ux_left = lambda x, y: tf.exp(-left_bottom)*(tf.pow(y, 3) + 1.0*left_bottom)
# ux_right = lambda x, y: tf.exp(-right_top)*(tf.pow(y, 3) + 1.0*right_top)
# uy_bottom = lambda x, y: tf.exp(-x)*(tf.pow(left_bottom, 3) + x)
# uy_top = lambda x, y: tf.exp(-x)*(tf.pow(right_top, 3) + x)
ux_left = lambda x, y: tf.zeros_like(x)
ux_right = lambda x, y: tf.zeros_like(x)
uy_bottom = lambda x, y: tf.zeros_like(x)
uy_top = lambda x, y: tf.zeros_like(x)
return ux_left, ux_right, uy_bottom, uy_top
def elliptic_coef2E1(input_dim=None, output_dim=None):
a_eps = lambda x, y: 1.0*tf.ones_like(x)
return a_eps
# 例二
def true_solution2E2(input_dim=None, output_dim=None, q=2, file_name=None):
mat_true = matData2pLaplace.loadMatlabIdata(file_name)
true_key = 'utrue'
utrue = mat_true[true_key]
return utrue
def force_side2E2(input_dim=None, output_dim=None):
f_side = lambda x, y: 1.0*tf.ones_like(x)
return f_side
def boundary2E2(input_dim=None, output_dim=None, left_bottom=0.0, right_top=1.0):
# ux_left = lambda x, y: tf.exp(-left_bottom)*(tf.pow(y, 3) + 1.0*left_bottom)
# ux_right = lambda x, y: tf.exp(-right_top)*(tf.pow(y, 3) + 1.0*right_top)
# uy_bottom = lambda x, y: tf.exp(-x)*(tf.pow(left_bottom, 3) + x)
# uy_top = lambda x, y: tf.exp(-x)*(tf.pow(right_top, 3) + x)
ux_left = lambda x, y: tf.zeros_like(x)
ux_right = lambda x, y: tf.zeros_like(x)
uy_bottom = lambda x, y: tf.zeros_like(x)
uy_top = lambda x, y: tf.zeros_like(x)
return ux_left, ux_right, uy_bottom, uy_top
def elliptic_coef2E2(input_dim=None, output_dim=None):
a_eps = lambda x, y: 2.0 + tf.multiply(tf.sin(3 * np.pi * x), tf.cos(5 * np.pi * y))
return a_eps
# 例三
def true_solution2E3(input_dim=None, output_dim=None, q=2, file_name=None):
mat_true = matData2pLaplace.loadMatlabIdata(file_name)
true_key = 'utrue'
utrue = mat_true[true_key]
return utrue
def force_side2E3(input_dim=None, output_dim=None):
f_side = lambda x, y: 1.0*tf.ones_like(x)
return f_side
def boundary2E3(input_dim=None, output_dim=None, left_bottom=0.0, right_top=1.0):
# ux_left = lambda x, y: tf.exp(-left_bottom)*(tf.pow(y, 3) + 1.0*left_bottom)
# ux_right = lambda x, y: tf.exp(-right_top)*(tf.pow(y, 3) + 1.0*right_top)
# uy_bottom = lambda x, y: tf.exp(-x)*(tf.pow(left_bottom, 3) + x)
# uy_top = lambda x, y: tf.exp(-x)*(tf.pow(right_top, 3) + x)
ux_left = lambda x, y: tf.zeros_like(x)
ux_right = lambda x, y: tf.zeros_like(x)
uy_bottom = lambda x, y: tf.zeros_like(x)
uy_top = lambda x, y: tf.zeros_like(x)
return ux_left, ux_right, uy_bottom, uy_top
def elliptic_coef2E3(input_dim=None, output_dim=None):
e1 = 1.0 / 5
e2 = 1.0 / 13
e3 = 1.0 / 17
e4 = 1.0 / 31
e5 = 1.0 / 65
a_eps = lambda x, y: (1.0/6)*((1.1+tf.sin(2*np.pi*x/e1))/(1.1+tf.sin(2*np.pi*y/e1)) +
(1.1+tf.sin(2*np.pi*y/e2))/(1.1+tf.cos(2*np.pi*x/e2)) +
(1.1+tf.cos(2*np.pi*x/e3))/(1.1+tf.sin(2*np.pi*y/e3)) +
(1.1+tf.sin(2*np.pi*y/e4))/(1.1+tf.cos(2*np.pi*x/e4)) +
(1.1+tf.cos(2*np.pi*x/e5))/(1.1+tf.sin(2*np.pi*y/e5)) +
tf.sin(4*(x**2)*(y**2))+1)
return a_eps
# 例四
def true_solution2E4(input_dim=None, output_dim=None, q=2, file_name=None):
mat_true = matData2pLaplace.loadMatlabIdata(file_name)
true_key = 'utrue'
utrue = mat_true[true_key]
return utrue
def force_side2E4(input_dim=None, output_dim=None):
f_side = lambda x, y: 1.0*tf.ones_like(x)
return f_side
def boundary2E4(input_dim=None, output_dim=None, left_bottom=0.0, right_top=1.0):
# ux_left = lambda x, y: tf.exp(-left_bottom)*(tf.pow(y, 3) + 1.0*left_bottom)
# ux_right = lambda x, y: tf.exp(-right_top)*(tf.pow(y, 3) + 1.0*right_top)
# uy_bottom = lambda x, y: tf.exp(-x)*(tf.pow(left_bottom, 3) + x)
# uy_top = lambda x, y: tf.exp(-x)*(tf.pow(right_top, 3) + x)
ux_left = lambda x, y: tf.zeros_like(x)
ux_right = lambda x, y: tf.zeros_like(x)
uy_bottom = lambda x, y: tf.zeros_like(x)
uy_top = lambda x, y: tf.zeros_like(x)
return ux_left, ux_right, uy_bottom, uy_top
def elliptic_coef2E4(input_dim=None, output_dim=None, mesh_num=2):
if mesh_num == 2:
a_eps = lambda x, y: (1+0.5*tf.cos(2*np.pi*(x+y)))*(1+0.5*tf.sin(2*np.pi*(y-3*x))) * \
(1+0.5*tf.cos((2**2)*np.pi*(x+y)))*(1+0.5*tf.sin((2**2)*np.pi*(y-3*x)))
elif mesh_num==3:
a_eps = lambda x, y: (1 + 0.5 * tf.cos(2 * np.pi * (x + y))) * (1 + 0.5 * tf.sin(2 * np.pi * (y - 3 * x))) * \
(1 + 0.5 * tf.cos((2 ** 2) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 2) * np.pi * (y - 3 * x)))\
* (1 + 0.5 * tf.cos((2 ** 3) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 3) * np.pi * (y - 3 * x)))
elif mesh_num == 4:
a_eps = lambda x, y: (1 + 0.5 * tf.cos(2 * np.pi * (x + y))) * (1 + 0.5 * tf.sin(2 * np.pi * (y - 3 * x))) * \
(1 + 0.5 * tf.cos((2 ** 2) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 2) * np.pi * (y - 3 * x)))\
* (1 + 0.5 * tf.cos((2 ** 3) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 3) * np.pi * (y - 3 * x)))\
* (1 + 0.5 * tf.cos((2 ** 4) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 4) * np.pi * (y - 3 * x)))
elif mesh_num == 5:
a_eps = lambda x, y: (1 + 0.5 * tf.cos(2 * np.pi * (x + y))) * (1 + 0.5 * tf.sin(2 * np.pi * (y - 3 * x))) * \
(1 + 0.5 * tf.cos((2 ** 2) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 2) * np.pi * (y - 3 * x)))\
* (1 + 0.5 * tf.cos((2 ** 3) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 3) * np.pi * (y - 3 * x)))\
* (1 + 0.5 * tf.cos((2 ** 4) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 4) * np.pi * (y - 3 * x))) \
* (1 + 0.5 * tf.cos((2 ** 5) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 5) * np.pi * (y - 3 * x)))
elif mesh_num == 6:
a_eps = lambda x, y: (1 + 0.5 * tf.cos(2 * np.pi * (x + y))) * (1 + 0.5 * tf.sin(2 * np.pi * (y - 3 * x))) * \
(1 + 0.5 * tf.cos((2 ** 2) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 2) * np.pi * (y - 3 * x)))\
* (1 + 0.5 * tf.cos((2 ** 3) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 3) * np.pi * (y - 3 * x)))\
* (1 + 0.5 * tf.cos((2 ** 4) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 4) * np.pi * (y - 3 * x))) \
* (1 + 0.5 * tf.cos((2 ** 5) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 5) * np.pi * (y - 3 * x))) \
* (1 + 0.5 * tf.cos((2 ** 6) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 6) * np.pi * (y - 3 * x)))
elif mesh_num == 7:
a_eps = lambda x, y: (1 + 0.5 * tf.cos(2 * np.pi * (x + y))) * (1 + 0.5 * tf.sin(2 * np.pi * (y - 3 * x))) * \
(1 + 0.5 * tf.cos((2 ** 2) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 2) * np.pi * (y - 3 * x)))\
* (1 + 0.5 * tf.cos((2 ** 3) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 3) * np.pi * (y - 3 * x)))\
* (1 + 0.5 * tf.cos((2 ** 4) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 4) * np.pi * (y - 3 * x))) \
* (1 + 0.5 * tf.cos((2 ** 5) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 5) * np.pi * (y - 3 * x))) \
* (1 + 0.5 * tf.cos((2 ** 6) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 6) * np.pi * (y - 3 * x)))\
* (1 + 0.5 * tf.cos((2 ** 7) * np.pi * (x + y))) * (1 + 0.5 * tf.sin((2 ** 7) * np.pi * (y - 3 * x)))
return a_eps
# 例五
def true_solution2E5(input_dim=None, output_dim=None, q=2, file_name=None):
mat_true = matData2pLaplace.loadMatlabIdata(file_name)
true_key = 'utrue'
utrue = mat_true[true_key]
return utrue
def force_side2E5(input_dim=None, output_dim=None):
f_side = lambda x, y: 1.0*tf.ones_like(x)
return f_side
def boundary2E5(input_dim=None, output_dim=None, left_bottom=0.0, right_top=1.0):
# ux_left = lambda x, y: tf.exp(-left_bottom)*(tf.pow(y, 3) + 1.0*left_bottom)
# ux_right = lambda x, y: tf.exp(-right_top)*(tf.pow(y, 3) + 1.0*right_top)
# uy_bottom = lambda x, y: tf.exp(-x)*(tf.pow(left_bottom, 3) + x)
# uy_top = lambda x, y: tf.exp(-x)*(tf.pow(right_top, 3) + x)
ux_left = lambda x, y: tf.zeros_like(x)
ux_right = lambda x, y: tf.zeros_like(x)
uy_bottom = lambda x, y: tf.zeros_like(x)
uy_top = lambda x, y: tf.zeros_like(x)
return ux_left, ux_right, uy_bottom, uy_top
def elliptic_coef2E5(input_dim=None, output_dim=None):
e1 = 1.0 / 5
e2 = 1.0 / 13
e3 = 1.0 / 17
e4 = 1.0 / 31
e5 = 1.0 / 65
a_eps = lambda x, y: (1.0/6)*((1.1+tf.sin(2*np.pi*x/e1))/(1.1+tf.sin(2*np.pi*y/e1)) +
(1.1+tf.sin(2*np.pi*y/e2))/(1.1+tf.cos(2*np.pi*x/e2)) +
(1.1+tf.cos(2*np.pi*x/e3))/(1.1+tf.sin(2*np.pi*y/e3)) +
(1.1+tf.sin(2*np.pi*y/e4))/(1.1+tf.cos(2*np.pi*x/e4)) +
(1.1+tf.cos(2*np.pi*x/e5))/(1.1+tf.sin(2*np.pi*y/e5)) +
tf.sin(4*(x**2)*(y**2))+1)
return a_eps
# 例六
def true_solution2E6(input_dim=None, output_dim=None, q=2, file_name=None):
mat_true = matData2pLaplace.loadMatlabIdata(file_name)
true_key = 'utrue'
utrue = mat_true[true_key]
return utrue
def force_side2E6(input_dim=None, output_dim=None):
f_side = lambda x, y: 1.0*tf.ones_like(x)
return f_side
def boundary2E6(input_dim=None, output_dim=None, left_bottom=0.0, right_top=1.0):
# ux_left = lambda x, y: tf.exp(-left_bottom)*(tf.pow(y, 3) + 1.0*left_bottom)
# ux_right = lambda x, y: tf.exp(-right_top)*(tf.pow(y, 3) + 1.0*right_top)
# uy_bottom = lambda x, y: tf.exp(-x)*(tf.pow(left_bottom, 3) + x)
# uy_top = lambda x, y: tf.exp(-x)*(tf.pow(right_top, 3) + x)
ux_left = lambda x, y: tf.zeros_like(x)
ux_right = lambda x, y: tf.zeros_like(x)
uy_bottom = lambda x, y: tf.zeros_like(x)
uy_top = lambda x, y: tf.zeros_like(x)
return ux_left, ux_right, uy_bottom, uy_top
def elliptic_coef2E6(input_dim=None, output_dim=None):
e1 = 1.0 / 5
e2 = 1.0 / 13
e3 = 1.0 / 17
e4 = 1.0 / 31
e5 = 1.0 / 65
a_eps = lambda x, y: (1.0/6)*((1.1+tf.sin(2*np.pi*x/e1))/(1.1+tf.sin(2*np.pi*y/e1)) +
(1.1+tf.sin(2*np.pi*y/e2))/(1.1+tf.cos(2*np.pi*x/e2)) +
(1.1+tf.cos(2*np.pi*x/e3))/(1.1+tf.sin(2*np.pi*y/e3)) +
(1.1+tf.sin(2*np.pi*y/e4))/(1.1+tf.cos(2*np.pi*x/e4)) +
(1.1+tf.cos(2*np.pi*x/e5))/(1.1+tf.sin(2*np.pi*y/e5)) +
tf.sin(4*(x**2)*(y**2))+1)
return a_eps
# 例七
def true_solution2E7(input_dim=None, output_dim=None, eps=0.1):
utrue = lambda x, y: 0.5*tf.sin(np.pi*x)*tf.sin(np.pi*y)+0.025*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)
return utrue
def force_side2E7(input_dim=None, output_dim=None):
f_side = lambda x, y: 5*((np.pi)**2)*(0.5*tf.sin(np.pi*x)*tf.cos(np.pi*y)+0.25*tf.sin(10*np.pi*x)*tf.cos(10*np.pi*y))*\
(0.25*tf.cos(5*np.pi*x)*tf.sin(10*np.pi*y)+0.5*tf.cos(15*np.pi*x)*tf.sin(20*np.pi*y))+ \
5*((np.pi)**2)*(0.5*tf.cos(np.pi*x)*tf.sin(np.pi*y)+0.25*tf.cos(10*np.pi*x)*tf.sin(10*np.pi*y))*\
(0.125*tf.sin(5*np.pi*x)*tf.cos(10*np.pi*y)+0.125*3*tf.sin(15*np.pi*x)*tf.cos(20*np.pi*y))+\
((np.pi)**2)*(tf.sin(np.pi*x)*tf.sin(np.pi*y)+5*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y))*\
(0.125*tf.cos(5*np.pi*x)*tf.cos(10*np.pi*y)+0.125*tf.cos(15*np.pi*x)*tf.cos(20*np.pi*y)+0.5)
return f_side
def boundary2E7(input_dim=None, output_dim=None, left_bottom=0.0, right_top=1.0, eps=0.1):
ux_left = lambda x, y: tf.zeros_like(x)
ux_right = lambda x, y: tf.zeros_like(x)
uy_bottom = lambda x, y: tf.zeros_like(x)
uy_top = lambda x, y: tf.zeros_like(x)
return ux_left, ux_right, uy_bottom, uy_top
def elliptic_coef2E7(input_dim=None, output_dim=None, eps=0.1):
a_eps = lambda x, y: 0.5 + 0.125*tf.cos(5*np.pi*x)*tf.cos(10*np.pi*y) + 0.125*tf.cos(15*np.pi*x)*tf.cos(20*np.pi*y)
return a_eps
def get_infos2pLaplace_2D(input_dim=1, out_dim=1, mesh_number=2, intervalL=0.0, intervalR=1.0, equa_name=None):
if equa_name == 'multi_scale2D_1':
f = force_side2E1(input_dim, out_dim) # f是一个向量
u_true_filepath = 'dataMat2pLaplace/E1/' + str('utrue') + str(mesh_number) + str('.mat')
u_true = true_solution2E1(input_dim, out_dim, q=mesh_number, file_name=u_true_filepath)
u_left, u_right, u_bottom, u_top = boundary2E1(input_dim, out_dim, intervalL, intervalR)
# A_eps要作用在u的每一个网格点值,所以A_eps在每一个网格点都要求值,和u类似
A_eps = elliptic_coef2E1(input_dim, out_dim)
elif equa_name == 'multi_scale2D_2':
intervalL = -1.0
intervalR = 1.0
f = force_side2E2(input_dim, out_dim) # f是一个向量
u_true_filepath = 'dataMat2pLaplace/E2/' + str('utrue') + str(mesh_number) + str('.mat')
u_true = true_solution2E2(input_dim, out_dim, q=mesh_number, file_name=u_true_filepath)
u_left, u_right, u_bottom, u_top = boundary2E2(input_dim, out_dim, intervalL, intervalR)
# A_eps要作用在u的每一个网格点值,所以A_eps在每一个网格点都要求值,和u类似
A_eps = elliptic_coef2E2(input_dim, out_dim)
elif equa_name == 'multi_scale2D_3':
intervalL = -1.0
intervalR = 1.0
f = force_side2E3(input_dim, out_dim) # f是一个向量
u_true_filepath = 'dataMat2pLaplace/E3/' + str('utrue') + str(mesh_number) + str('.mat')
u_true = true_solution2E3(input_dim, out_dim, q=mesh_number, file_name=u_true_filepath)
u_left, u_right, u_bottom, u_top = boundary2E3(input_dim, out_dim, intervalL, intervalR)
# A_eps要作用在u的每一个网格点值,所以A_eps在每一个网格点都要求值,和u类似
A_eps = elliptic_coef2E3(input_dim, out_dim)
elif equa_name == 'multi_scale2D_4':
intervalL = -1.0
intervalR = 1.0
f = force_side2E4(input_dim, out_dim) # f是一个向量
u_true_filepath = 'dataMat2pLaplace/E4/' + str('utrue') + str(mesh_number) + str('.mat')
u_true = true_solution2E4(input_dim, out_dim, q=mesh_number, file_name=u_true_filepath)
u_left, u_right, u_bottom, u_top = boundary2E4(input_dim, out_dim, intervalL, intervalR)
# A_eps要作用在u的每一个网格点值,所以A_eps在每一个网格点都要求值,和u类似
A_eps = elliptic_coef2E4(input_dim, out_dim, mesh_num=mesh_number)
elif equa_name == 'multi_scale2D_5':
intervalL = 0
intervalR = 1.0
f = force_side2E5(input_dim, out_dim) # f是一个向量
u_true_filepath = 'dataMat2pLaplace/E5/' + str('utrue') + str(mesh_number) + str('.mat')
u_true = true_solution2E5(input_dim, out_dim, q=mesh_number, file_name=u_true_filepath)
u_left, u_right, u_bottom, u_top = boundary2E5(input_dim, out_dim, intervalL, intervalR)
# A_eps要作用在u的每一个网格点值,所以A_eps在每一个网格点都要求值,和u类似
A_eps = elliptic_coef2E5(input_dim, out_dim)
elif equa_name == 'multi_scale2D_6':
intervalL = -1.0
intervalR = 1.0
f = force_side2E6(input_dim, out_dim) # f是一个向量
u_true_filepath = 'dataMat2pLaplace/E6/' + str('utrue') + str(mesh_number) + str('.mat')
u_true = true_solution2E6(input_dim, out_dim, q=mesh_number, file_name=u_true_filepath)
u_left, u_right, u_bottom, u_top = boundary2E6(input_dim, out_dim, intervalL, intervalR)
# A_eps要作用在u的每一个网格点值,所以A_eps在每一个网格点都要求值,和u类似
A_eps = elliptic_coef2E6(input_dim, out_dim)
return u_true, f, A_eps, u_left, u_right, u_bottom, u_top
def get_infos2pLaplace_3D(input_dim=1, out_dim=1, mesh_number=2, intervalL=0.0, intervalR=1.0, equa_name=None):
if equa_name == 'multi_scale3D_2':
fside = lambda x, y, z: 3.0*((np.pi)**2)*(1.0+tf.cos(np.pi*x)*tf.cos(3*np.pi*y)*tf.cos(5*np.pi*z))*\
(tf.sin(np.pi*x) * tf.sin(np.pi*y) * tf.sin(np.pi*z))+\
((np.pi)**2)*(tf.sin(np.pi*x)*tf.cos(3*np.pi*y)*tf.cos(5*np.pi*z)*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z))+\
3.0*((np.pi)** 2)*(tf.cos(np.pi*x)*tf.sin(3*np.pi*y)*tf.cos(5*np.pi*z)*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z))+ \
5.0*((np.pi)**2)*(tf.cos(np.pi*x)*tf.cos(3*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(np.pi * x)*tf.sin(np.pi*y)*tf.cos(np.pi*z))
u_true = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z)
A_eps = lambda x, y, z: 1.0 + tf.cos(np.pi * x) * tf.cos(3 * np.pi * y) * tf.cos(5 * np.pi * z)
u_00 = lambda x, y, z: tf.sin(np.pi * intervalL) * tf.sin(np.pi * y) * tf.sin(np.pi * z)
u_01 = lambda x, y, z: tf.sin(np.pi * intervalR) * tf.sin(np.pi * y) * tf.sin(np.pi * z)
u_10 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * intervalL) * tf.sin(np.pi * z)
u_11 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * intervalR) * tf.sin(np.pi * z)
u_20 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * intervalL)
u_21 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21
elif equa_name == 'multi_scale3D_3':
fside = lambda x, y, z: (63/4)*((np.pi)**2)*(1.0+tf.cos(np.pi*x)*tf.cos(10*np.pi*y)*tf.cos(20*np.pi*z))*\
(tf.sin(np.pi*x) * tf.sin(5*np.pi*y) * tf.sin(10*np.pi*z))+\
0.125*((np.pi)**2)*tf.sin(np.pi*x)*tf.cos(10*np.pi*y)*tf.cos(20*np.pi*z)*tf.cos(np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(10*np.pi*z)+\
(25/4)*((np.pi)** 2)*tf.cos(np.pi*x)*tf.sin(10*np.pi*y)*tf.cos(20*np.pi*z)*tf.sin(np.pi*x)*tf.cos(5*np.pi*y)*tf.sin(10*np.pi*z)+ \
25.0*((np.pi)**2)*tf.cos(np.pi*x)*tf.cos(10*np.pi*y)*tf.sin(20*np.pi*z)*tf.sin(np.pi * x)*tf.sin(5*np.pi*y)*tf.cos(10*np.pi*z)
u_true = lambda x, y, z: 0.5*tf.sin(np.pi * x) * tf.sin(5*np.pi * y) * tf.sin(10*np.pi * z)
A_eps = lambda x, y, z: 0.25*(1.0 + tf.cos(np.pi * x) * tf.cos(10 * np.pi * y) * tf.cos(20 * np.pi * z))
u_00 = lambda x, y, z: 0.5*tf.sin(np.pi * intervalL) * tf.sin(5*np.pi * y) * tf.sin(np.pi * z)
u_01 = lambda x, y, z: 0.5*tf.sin(np.pi * intervalR) * tf.sin(5*np.pi * y) * tf.sin(np.pi * z)
u_10 = lambda x, y, z: 0.5*tf.sin(np.pi * x) * tf.sin(5*np.pi * intervalL) * tf.sin(np.pi * z)
u_11 = lambda x, y, z: 0.5*tf.sin(np.pi * x) * tf.sin(5*np.pi * intervalR) * tf.sin(np.pi * z)
u_20 = lambda x, y, z: 0.5*tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(10*np.pi * intervalL)
u_21 = lambda x, y, z: 0.5*tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(10*np.pi * intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21
elif equa_name == 'multi_scale3D_4':
fside = lambda x, y, z: tf.ones_like(x)
u_true = lambda x, y, z: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)+0.05*tf.sin(10*np.pi*x)*tf.sin(20*np.pi*y)*tf.sin(40*np.pi*z)
A_eps = lambda x, y, z: 0.5 * (2.0 + tf.cos(10*np.pi * x) * tf.cos(20 * np.pi * y) * tf.cos(40 * np.pi * z))
u_00 = lambda x, y, z: tf.sin(np.pi*intervalL)*tf.sin(np.pi*y)*tf.sin(np.pi*z)+0.05*tf.sin(10*np.pi*intervalL)*tf.sin(20*np.pi*y)*tf.sin(40*np.pi*z)
u_01 = lambda x, y, z: tf.sin(np.pi*intervalR)*tf.sin(np.pi*y)*tf.sin(np.pi*z)+0.05*tf.sin(10*np.pi*intervalR)*tf.sin(20*np.pi*y)*tf.sin(40*np.pi*z)
u_10 = lambda x, y, z: tf.sin(np.pi*x)*tf.sin(np.pi*intervalL)*tf.sin(np.pi*z)+0.05*tf.sin(10*np.pi*x)*tf.sin(20*np.pi*intervalL)*tf.sin(40*np.pi*z)
u_11 = lambda x, y, z: tf.sin(np.pi*x)*tf.sin(np.pi*intervalR)*tf.sin(np.pi*z)+0.05*tf.sin(10*np.pi*x)*tf.sin(20*np.pi*intervalR)*tf.sin(40*np.pi*z)
u_20 = lambda x, y, z: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalL)+0.05*tf.sin(10*np.pi*x)*tf.sin(20*np.pi*y)*tf.sin(40*np.pi*intervalL)
u_21 = lambda x, y, z: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalR)+0.05*tf.sin(10*np.pi*x)*tf.sin(20*np.pi*y)*tf.sin(40*np.pi*intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21
elif equa_name == 'multi_scale3D_5':
fside = lambda x, y, z: tf.ones_like(x)
u_true = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) + \
0.05 * tf.sin(10 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z)
A_eps = lambda x, y, z: 0.5 * (1.0 + tf.cos(10.0*np.pi * x) * tf.cos(10.0 * np.pi * y) * tf.cos(10.0 * np.pi * z))
u_00 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) + \
0.05 * tf.sin(10 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z)
u_01 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) + \
0.05 * tf.sin(10 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z)
u_10 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) + \
0.05 * tf.sin(10 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z)
u_11 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) + \
0.05 * tf.sin(10 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z)
u_20 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) + \
0.05 * tf.sin(10 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z)
u_21 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) + \
0.05 * tf.sin(10 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21
elif equa_name == 'multi_scale3D_6':
fside = lambda x, y, z: tf.ones_like(x)
u_true = lambda x, y, z: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)+0.05*tf.sin(20*np.pi*x)*tf.sin(20*np.pi*y)*tf.sin(20*np.pi*z)
A_eps = lambda x, y, z: 0.25*(2.0+tf.cos(np.pi*x)*tf.cos(np.pi*y)*tf.cos(np.pi*z)+tf.cos(20.0*np.pi*x)*tf.cos(20.0*np.pi*y)*tf.cos(20.0*np.pi*z))
u_00 = lambda x, y, z: tf.sin(np.pi * intervalL) * tf.sin(np.pi * y) * tf.sin(np.pi * z) + 0.05 * tf.sin(
20 * np.pi * intervalL) * tf.sin(20 * np.pi * y) * tf.sin(20 * np.pi * z)
u_01 = lambda x, y, z: tf.sin(np.pi * intervalR) * tf.sin(np.pi * y) * tf.sin(np.pi * z) + 0.05 * tf.sin(
20 * np.pi * intervalR) * tf.sin(20 * np.pi * y) * tf.sin(20 * np.pi * z)
u_10 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * intervalL) * tf.sin(np.pi * z) + 0.05 * tf.sin(
20 * np.pi * x) * tf.sin(20 * np.pi * intervalL) * tf.sin(20 * np.pi * z)
u_11 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * intervalR) * tf.sin(np.pi * z) + 0.05 * tf.sin(
20 * np.pi * x) * tf.sin(20 * np.pi * intervalR) * tf.sin(20 * np.pi * z)
u_20 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * intervalL) + 0.05 * tf.sin(
20 * np.pi * x) * tf.sin(20 * np.pi * y) * tf.sin(20 * np.pi * intervalL)
u_21 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * intervalR) + 0.05 * tf.sin(
20 * np.pi * x) * tf.sin(20 * np.pi * y) * tf.sin(20 * np.pi * intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21
elif equa_name == 'multi_scale3D_7':
fside = lambda x, y, z: tf.ones_like(x)
u_true = lambda x, y, z: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)+0.05*tf.sin(10.0*np.pi*x)*tf.sin(25.0*np.pi*y)*tf.sin(50.0*np.pi*z)
A_eps = lambda x, y, z: 0.25*(2.0+tf.cos(np.pi*x)*tf.cos(np.pi*y)*tf.cos(np.pi*z)+tf.cos(10.0*np.pi*x)*tf.cos(25.0*np.pi*y)*tf.cos(50.0*np.pi*z))
u_00 = lambda x, y, z: tf.sin(np.pi * intervalL) * tf.sin(np.pi * y) * tf.sin(np.pi * z) + 0.05 * tf.sin(
10 * np.pi * intervalL) * tf.sin(25.0 * np.pi * y) * tf.sin(50.0 * np.pi * z)
u_01 = lambda x, y, z: tf.sin(np.pi * intervalR) * tf.sin(np.pi * y) * tf.sin(np.pi * z) + 0.05 * tf.sin(
10 * np.pi * intervalR) * tf.sin(25.0 * np.pi * y) * tf.sin(50.0 * np.pi * z)
u_10 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * intervalL) * tf.sin(np.pi * z) + 0.05 * tf.sin(
10 * np.pi * x) * tf.sin(25.0 * np.pi * intervalL) * tf.sin(50.0 * np.pi * z)
u_11 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * intervalR) * tf.sin(np.pi * z) + 0.05 * tf.sin(
10 * np.pi * x) * tf.sin(25.0 * np.pi * intervalR) * tf.sin(50.0 * np.pi * z)
u_20 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * intervalL) + 0.05 * tf.sin(
10 * np.pi * x) * tf.sin(25.0 * np.pi * y) * tf.sin(50.0 * np.pi * intervalL)
u_21 = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * intervalR) + 0.05 * tf.sin(
10 * np.pi * x) * tf.sin(25.0 * np.pi * y) * tf.sin(50.0 * np.pi * intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21
def get_force2pLaplace3D(x=None, y=None, z=None, equa_name='multi_scale3D_5'):
if equa_name == 'multi_scale3D_4':
u_true = tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)+0.05*tf.sin(10*np.pi*x)*tf.sin(20*np.pi*y)*tf.sin(40*np.pi*z)
Aeps = 0.5 * (2.0 + tf.cos(10*np.pi * x) * tf.cos(20 * np.pi * y) * tf.cos(40 * np.pi * z))
ux = np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)+0.05*10*np.pi*tf.cos(10*np.pi*x)*tf.sin(20*np.pi*y)*tf.sin(40*np.pi*z)
uy = np.pi*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z)+0.05*20*np.pi*tf.sin(10*np.pi*x)*tf.cos(20*np.pi*y)*tf.sin(40*np.pi*z)
uz = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z)+0.05*40*np.pi*tf.sin(10*np.pi*x)*tf.sin(20*np.pi*y)*tf.cos(40*np.pi*z)
uxx = -1.0 * np.pi * np.pi * tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) - \
0.05 * 100 * np.pi * np.pi * tf.sin(10 * np.pi * x) * tf.sin(20 * np.pi * y) * tf.sin(40 * np.pi * z)
uyy = -1.0 * np.pi * np.pi * tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) - \
0.05 * 400 * np.pi * np.pi * tf.sin(10 * np.pi * x) * tf.sin(20 * np.pi * y) * tf.sin(40 * np.pi * z)
uzz = -1.0 * np.pi * np.pi * tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) - \
0.05 * 1600 * np.pi * np.pi * tf.sin(10 * np.pi * x) * tf.sin(20 * np.pi * y) * tf.sin(40 * np.pi * z)
Aepsx = -0.5 * 10 * np.pi * tf.sin(10 * np.pi * x) * tf.cos(20 * np.pi * y) * tf.cos(40 * np.pi * z)
Aepsy = -0.5 * 20 * np.pi * tf.cos(10 * np.pi * x) * tf.sin(20 * np.pi * y) * tf.cos(40 * np.pi * z)
Aepsz = -0.5 * 40 * np.pi * tf.cos(10 * np.pi * x) * tf.cos(20 * np.pi * y) * tf.sin(40 * np.pi * z)
fside = -1.0 * (Aepsx * ux + Aepsy * uy + Aepsz * uz + Aeps * (uxx + uyy + uzz))
return fside
elif equa_name == 'multi_scale3D_5':
u_true = tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) + 0.05 * tf.sin(10 * np.pi * x) * tf.sin(
10 * np.pi * y) * tf.sin(10 * np.pi * z)
Aeps = 0.5 * (1.0 + tf.cos(10.0 * np.pi * x) * tf.cos(10.0 * np.pi * y) * tf.cos(10.0 * np.pi * z))
ux = np.pi * tf.cos(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) + 0.5 * np.pi * tf.cos(
10 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z)
uy = np.pi * tf.sin(np.pi * x) * tf.cos(np.pi * y) * tf.sin(np.pi * z) + 0.5 * np.pi * tf.sin(
10 * np.pi * x) * tf.cos(10 * np.pi * y) * tf.sin(10 * np.pi * z)
uz = np.pi * tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.cos(np.pi * z) + 0.5 * np.pi * tf.sin(
10 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.cos(10 * np.pi * z)
uxx = -1.0 * np.pi * np.pi * tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(
np.pi * z) - 5.0 * np.pi * np.pi * tf.sin(10 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z)
uyy = -1.0 * np.pi * np.pi * tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(
np.pi * z) - 5.0 * np.pi * np.pi * tf.sin(10 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z)
uzz = -1.0 * np.pi * np.pi * tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(
np.pi * z) - 5.0 * np.pi * np.pi * tf.sin(10 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z)
Aepsx = -0.5 * 10.0 * np.pi * tf.sin(10.0 * np.pi * x) * tf.cos(10.0 * np.pi * y) * tf.cos(10.0 * np.pi * z)
Aepsy = -0.5 * 10.0 * np.pi * tf.cos(10.0 * np.pi * x) * tf.sin(10.0 * np.pi * y) * tf.cos(10.0 * np.pi * z)
Aepsz = -0.5 * 10.0 * np.pi * tf.cos(10.0 * np.pi * x) * tf.cos(10.0 * np.pi * y) * tf.sin(10.0 * np.pi * z)
fside = -1.0 * (Aepsx * ux + Aepsy * uy + Aepsz * uz + Aeps * (uxx + uyy + uzz))
return fside
elif equa_name == 'multi_scale3D_6':
u_true = tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)+0.05*tf.sin(20*np.pi*x)*tf.sin(20*np.pi*y)*tf.sin(20*np.pi*z)
Aeps = 0.25*(2.0+tf.cos(np.pi*x)*tf.cos(np.pi*y)*tf.cos(np.pi*z)+tf.cos(20.0*np.pi*x)*tf.cos(20.0*np.pi*y)*tf.cos(20.0*np.pi*z))
ux = np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)+1.0*np.pi*tf.cos(20*np.pi*x)*tf.sin(20*np.pi*y)*tf.sin(20*np.pi*z)
uy = np.pi*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z)+1.0*np.pi*tf.sin(20*np.pi*x)*tf.cos(20*np.pi*y)*tf.sin(20*np.pi*z)
uz = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z)+1.0*np.pi*tf.sin(20*np.pi*x)*tf.sin(20*np.pi*y)*tf.cos(20*np.pi*z)
uxx = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)-20.0*np.pi*np.pi*tf.sin(20*np.pi*x)*tf.sin(20*np.pi*y)*tf.sin(20*np.pi*z)
uyy = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)-20.0*np.pi*np.pi*tf.sin(20*np.pi*x)*tf.sin(20*np.pi*y)*tf.sin(20*np.pi*z)
uzz = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)-20.0*np.pi*np.pi*tf.sin(20*np.pi*x)*tf.sin(20*np.pi*y)*tf.sin(20*np.pi*z)
Aepsx = -0.25*np.pi*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.cos(np.pi*z) - 5.0*np.pi*tf.sin(20*np.pi*x)*tf.cos(20*np.pi*y)*tf.cos(20*np.pi*z)
Aepsy = -0.25*np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z) - 5.0*np.pi*tf.cos(20*np.pi*x)*tf.sin(20*np.pi*y)*tf.cos(20*np.pi*z)
Aepsz = -0.25*np.pi*tf.cos(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z) - 5.0*np.pi*tf.cos(20*np.pi*x)*tf.cos(20*np.pi*y)*tf.sin(20*np.pi*z)
fside = -1.0 * (Aepsx * ux + Aepsy * uy + Aepsz * uz + Aeps * (uxx + uyy + uzz))
return fside
elif equa_name == 'multi_scale3D_7':
u_true = tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)+0.05*tf.sin(10*np.pi*x)*tf.sin(25.0*np.pi*y)*tf.sin(50*np.pi*z)
Aeps = 0.25*(2.0+tf.cos(np.pi*x)*tf.cos(np.pi*y)*tf.cos(np.pi*z)+tf.cos(10.0*np.pi*x)*tf.cos(25.0*np.pi*y)*tf.cos(50.0*np.pi*z))
ux = np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)+0.05*10*np.pi*tf.cos(10*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(50*np.pi*z)
uy = np.pi*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z)+0.05*25*np.pi*tf.sin(10*np.pi*x)*tf.cos(25*np.pi*y)*tf.sin(50*np.pi*z)
uz = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z)+0.05*50*np.pi*tf.sin(10*np.pi*x)*tf.sin(25*np.pi*y)*tf.cos(50*np.pi*z)
uxx = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)-0.05*100*np.pi*np.pi*tf.sin(10*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(50*np.pi*z)
uyy = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)-0.05*625*np.pi*np.pi*tf.sin(10*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(50*np.pi*z)
uzz = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)-0.05*2500*np.pi*np.pi*tf.sin(10*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(50*np.pi*z)
Aepsx = -0.25*np.pi*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.cos(np.pi*z) - 0.25*10*np.pi*tf.sin(10*np.pi*x)*tf.cos(25*np.pi*y)*tf.cos(50*np.pi*z)
Aepsy = -0.25*np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z) - 0.25*25*np.pi*tf.cos(10*np.pi*x)*tf.sin(25*np.pi*y)*tf.cos(50*np.pi*z)
Aepsz = -0.25*np.pi*tf.cos(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z) - 0.25*50*np.pi*tf.cos(10*np.pi*x)*tf.cos(25*np.pi*y)*tf.sin(50*np.pi*z)
fside = -1.0 * (Aepsx * ux + Aepsy * uy + Aepsz * uz + Aeps * (uxx + uyy + uzz))
return fside
def get_infos2pLaplace_4D(input_dim=1, out_dim=1, mesh_number=2, intervalL=0.0, intervalR=1.0, equa_name=None):
if equa_name == 'multi_scale4D_2':
fside = lambda x, y, z, s: tf.ones_like(s)
u_true = lambda x, y, z, s: tf.sin(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(10.0*np.pi*z)*tf.sin(5.0*np.pi*s)
A_eps = lambda x, y, z, s: 0.25*(1.0+tf.cos(5.0*np.pi*x)*tf.cos(10.0*np.pi*y)*tf.cos(10.0*np.pi*z)*tf.cos(5.0*np.pi*s))
u_00 = lambda x, y, z, s: tf.sin(5*np.pi * intervalL) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_01 = lambda x, y, z, s: tf.sin(5*np.pi * intervalR) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_10 = lambda x, y, z, s: tf.sin(5*np.pi * x) * tf.sin(10*np.pi * intervalL) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_11 = lambda x, y, z, s: tf.sin(5*np.pi * x) * tf.sin(10*np.pi * intervalR) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_20 = lambda x, y, z, s: tf.sin(5*np.pi * x) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * intervalL) * tf.sin(5*np.pi * s)
u_21 = lambda x, y, z, s: tf.sin(5*np.pi * x) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * intervalR) * tf.sin(5*np.pi * s)
u_30 = lambda x, y, z, s: tf.sin(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.sin(5 * np.pi * intervalL)
u_31 = lambda x, y, z, s: tf.sin(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.sin(5 * np.pi * intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31
elif equa_name == 'multi_scale4D_3':
fside = lambda x, y, z, s: tf.ones_like(s)
u_true = lambda x, y, z, s: tf.sin(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(10.0*np.pi*z)*tf.sin(5.0*np.pi*s)
A_eps = lambda x, y, z, s: 0.25*(1.0+tf.cos(5.0*np.pi*x)*tf.cos(20.0*np.pi*y)*tf.cos(20.0*np.pi*z)*tf.cos(5.0*np.pi*s))
u_00 = lambda x, y, z, s: tf.sin(5*np.pi * intervalL) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_01 = lambda x, y, z, s: tf.sin(5*np.pi * intervalR) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_10 = lambda x, y, z, s: tf.sin(5*np.pi * x) * tf.sin(10*np.pi * intervalL) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_11 = lambda x, y, z, s: tf.sin(5*np.pi * x) * tf.sin(10*np.pi * intervalR) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_20 = lambda x, y, z, s: tf.sin(5*np.pi * x) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * intervalL) * tf.sin(5*np.pi * s)
u_21 = lambda x, y, z, s: tf.sin(5*np.pi * x) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * intervalR) * tf.sin(5*np.pi * s)
u_30 = lambda x, y, z, s: tf.sin(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.sin(5 * np.pi * intervalL)
u_31 = lambda x, y, z, s: tf.sin(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.sin(5 * np.pi * intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31
elif equa_name == 'multi_scale4D_4':
fside = lambda x, y, z, s: tf.ones_like(s)
u_true = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s) + \
0.25*tf.sin(5.0*np.pi*x)*tf.sin(5.0*np.pi*y)*tf.sin(5.0*np.pi*z)*tf.sin(5.0*np.pi*s)
A_eps = lambda x, y, z, s: 1.0
u_00 = lambda x, y, z, s: tf.sin(5*np.pi * intervalL) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_01 = lambda x, y, z, s: tf.sin(5*np.pi * intervalR) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_10 = lambda x, y, z, s: tf.sin(5*np.pi * x) * tf.sin(10*np.pi * intervalL) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_11 = lambda x, y, z, s: tf.sin(5*np.pi * x) * tf.sin(10*np.pi * intervalR) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_20 = lambda x, y, z, s: tf.sin(5*np.pi * x) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * intervalL) * tf.sin(5*np.pi * s)
u_21 = lambda x, y, z, s: tf.sin(5*np.pi * x) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * intervalR) * tf.sin(5*np.pi * s)
u_30 = lambda x, y, z, s: tf.sin(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.sin(5 * np.pi * intervalL)
u_31 = lambda x, y, z, s: tf.sin(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.sin(5 * np.pi * intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31
elif equa_name == 'multi_scale4D_5':
fside = lambda x, y, z, s: tf.ones_like(s)
u_true = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
0.05*tf.sin(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(10.0*np.pi*z)*tf.sin(5.0*np.pi*s)
A_eps = lambda x, y, z, s: 0.25*(2.0+tf.cos(np.pi*x)*tf.cos(np.pi*y)*tf.cos(np.pi*z)*tf.cos(np.pi*s)+ \
tf.cos(5.0*np.pi*x)*tf.cos(10.0*np.pi*y)*tf.cos(10.0*np.pi*z)*tf.cos(5.0*np.pi*s))
u_00 = lambda x, y, z, s: tf.sin(np.pi*intervalL)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
tf.sin(5*np.pi * intervalL) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_01 = lambda x, y, z, s: tf.sin(np.pi*intervalR)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
tf.sin(5*np.pi * intervalR) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_10 = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*intervalL)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
tf.sin(5*np.pi * x) * tf.sin(10*np.pi * intervalL) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_11 = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*intervalR)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
tf.sin(5*np.pi * x) * tf.sin(10*np.pi * intervalR) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_20 = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalL)*tf.sin(np.pi*s)+\
tf.sin(5*np.pi * x) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * intervalL) * tf.sin(5*np.pi * s)
u_21 = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalR)*tf.sin(np.pi*s)+\
tf.sin(5*np.pi * x) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * intervalR) * tf.sin(5*np.pi * s)
u_30 = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*intervalL)+\
tf.sin(5*np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.sin(5*np.pi * intervalL)
u_31 = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*intervalR)+\
tf.sin(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.sin(5 * np.pi * intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31
elif equa_name == 'multi_scale4D_6':
fside = lambda x, y, z, s: tf.ones_like(s)
u_true = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
0.05*tf.sin(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(15.0*np.pi*z)*tf.sin(20.0*np.pi*s)
A_eps = lambda x, y, z, s: 0.25*(1.0 + tf.cos(10.0*np.pi*x)*tf.cos(20.0*np.pi*y)*tf.cos(20.0*np.pi*z)*tf.cos(10.0*np.pi*s))
u_00 = lambda x, y, z, s: tf.sin(np.pi*intervalL)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
tf.sin(5*np.pi * intervalL) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_01 = lambda x, y, z, s: tf.sin(np.pi*intervalR)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
tf.sin(5*np.pi * intervalR) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_10 = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*intervalL)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
tf.sin(5*np.pi * x) * tf.sin(10*np.pi * intervalL) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_11 = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*intervalR)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
tf.sin(5*np.pi * x) * tf.sin(10*np.pi * intervalR) * tf.sin(10*np.pi * z) * tf.sin(5*np.pi * s)
u_20 = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalL)*tf.sin(np.pi*s)+\
tf.sin(5*np.pi * x) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * intervalL) * tf.sin(5*np.pi * s)
u_21 = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalR)*tf.sin(np.pi*s)+\
tf.sin(5*np.pi * x) * tf.sin(10*np.pi * y) * tf.sin(10*np.pi * intervalR) * tf.sin(5*np.pi * s)
u_30 = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*intervalL)+\
tf.sin(5*np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.sin(5*np.pi * intervalL)
u_31 = lambda x, y, z, s: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*intervalR)+\
tf.sin(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.sin(5 * np.pi * intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31
def get_force2pLaplace_4D(x=None, y=None, z=None, s=None, equa_name=None):
if equa_name == 'multi_scale4D_2':
A = 0.25*(1.0 + tf.cos(5.0*np.pi * x)*tf.cos(10.0*np.pi*y) * tf.cos(10.0 *np.pi*z) * tf.cos(5.0*np.pi * s))
Ax = -0.25 *5.0*np.pi*tf.sin(5.0*np.pi*x)*tf.cos(10.0*np.pi*y)*tf.cos(10.0*np.pi*z) * tf.cos(5.0*np.pi*s)
Ay = -0.25*10.0*np.pi*tf.cos(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.cos(10.0*np.pi*z) * tf.cos(5.0* np.pi * s)
Az = -0.25*10.0*np.pi*tf.cos(5.0*np.pi*x)*tf.cos(10.0*np.pi*y)*tf.sin(10.0*np.pi*z) * tf.cos(5.0* np.pi * s)
As = -0.25 *5.0*np.pi*tf.cos(5.0*np.pi*x)*tf.cos(10.0*np.pi*y)*tf.cos(10.0*np.pi*z) * tf.sin(5.0* np.pi * s)
U = tf.sin(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(10.0*np.pi*z)*tf.sin(5.0*np.pi*s)
Ux = 5*np.pi * tf.cos(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.sin(5 * np.pi * s)
Uy = 10*np.pi* tf.sin(5 * np.pi * x) * tf.cos(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.sin(5 * np.pi * s)
Uz = 10*np.pi* tf.sin(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.cos(10 * np.pi * z) * tf.sin(5 * np.pi * s)
Us = 5*np.pi * tf.sin(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.cos(5 * np.pi * s)
fside = -1.0*(Ax*Ux + Ay*Uy + Az*Uz + As*Us) + 250.0*A*np.pi*np.pi*U
return fside
elif equa_name == 'multi_scale4D_4':
A = 1.0
Ax = 1.0
Ay = 1.0
Az = 1.0
As = 1.0
U = tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s) + \
0.25*tf.sin(5.0*np.pi*x)*tf.sin(5.0*np.pi*y)*tf.sin(5.0*np.pi*z)*tf.sin(5.0*np.pi*s)
Ux = np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s) + \
0.25*5.0*np.pi*tf.cos(5.0*np.pi*x)*tf.sin(5.0*np.pi*y)*tf.sin(5.0*np.pi*z)*tf.sin(5.0*np.pi*s)
Uy = tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s) + \
0.25*5.0*np.pi*tf.sin(5.0*np.pi*x)*tf.cos(5.0*np.pi*y)*tf.sin(5.0*np.pi*z)*tf.sin(5.0*np.pi*s)
Uz = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z)*tf.sin(np.pi*s) + \
0.25*5.0*np.pi*tf.sin(5.0*np.pi*x)*tf.sin(5.0*np.pi*y)*tf.cos(5.0*np.pi*z)*tf.sin(5.0*np.pi*s)
Us = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.cos(np.pi*s) + \
0.25*5.0*np.pi*tf.sin(5.0*np.pi*x)*tf.sin(5.0*np.pi*y)*tf.sin(5.0*np.pi*z)*tf.cos(5.0*np.pi*s)
fside = -1.0*(Ax*Ux + Ay*Uy + Az*Uz + As*Us) + \
4.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s) + \
0.25*20*np.pi*np.pi*tf.sin(5.0*np.pi*x)*tf.sin(5.0*np.pi*y)*tf.sin(5.0*np.pi*z)*tf.cos(5.0*np.pi*s)
return fside
elif equa_name == 'multi_scale4D_5':
A = 0.25*(2.0+tf.cos(np.pi*x)*tf.cos(np.pi*y)*tf.cos(np.pi*z)*tf.cos(np.pi*s)+\
tf.cos(5.0*np.pi*x)*tf.cos(10.0*np.pi*y)*tf.cos(10.0*np.pi*z)*tf.cos(5.0*np.pi*s))
Ax = -0.25*np.pi*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.cos(np.pi*z)*tf.cos(np.pi*s)-\
0.25 *5.0*np.pi*tf.sin(5.0*np.pi*x)*tf.cos(10.0*np.pi*y)*tf.cos(10.0*np.pi*z) * tf.cos(5.0*np.pi*s)
Ay = -0.25*np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z)*tf.cos(np.pi*s)-\
0.25*10.0*np.pi*tf.cos(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.cos(10.0*np.pi*z) * tf.cos(5.0* np.pi * s)
Az = -0.25*np.pi*tf.cos(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z)*tf.cos(np.pi*s)-\
0.25*10.0*np.pi*tf.cos(5.0*np.pi*x)*tf.cos(10.0*np.pi*y)*tf.sin(10.0*np.pi*z) * tf.cos(5.0* np.pi * s)
As = -0.25*np.pi*tf.cos(np.pi*x)*tf.cos(np.pi*y)*tf.cos(np.pi*z)*tf.sin(np.pi*s)-\
0.25 *5.0*np.pi*tf.cos(5.0*np.pi*x)*tf.cos(10.0*np.pi*y)*tf.cos(10.0*np.pi*z) * tf.sin(5.0* np.pi * s)
U = tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
0.05*tf.sin(10.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(10.0*np.pi*z)*tf.sin(5.0*np.pi*s)
Ux = np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
0.05*5.0*np.pi*tf.cos(5 * np.pi * x)*tf.sin(10 * np.pi*y)*tf.sin(10 * np.pi * z) * tf.sin(5 * np.pi * s)
Uy = np.pi*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
0.05*10.0*np.pi* tf.sin(5 * np.pi * x) * tf.cos(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.sin(5 * np.pi * s)
Uz = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z)*tf.sin(np.pi*s)+\
0.05*10.0*np.pi* tf.sin(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.cos(10 * np.pi * z) * tf.sin(5 * np.pi * s)
Us = tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.cos(np.pi*s)+\
0.05*5.0*np.pi * tf.sin(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(10 * np.pi * z) * tf.cos(5 * np.pi * s)
Uxx = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
-0.05*25.0*np.pi*np.pi*tf.sin(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(10.0*np.pi*z)*tf.sin(5.0*np.pi*s)
Uyy = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
-0.05*100.0*np.pi*np.pi*tf.sin(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(10.0*np.pi*z)*tf.sin(5.0*np.pi*s)
Uzz = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
-0.05*100.0*np.pi*np.pi*tf.sin(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(10.0*np.pi*z)*tf.sin(5.0*np.pi*s)
Uss = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
-0.05*25.0*np.pi*np.pi*tf.sin(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(10.0*np.pi*z)*tf.sin(5.0*np.pi*s)
fside = -1.0*(Ax*Ux + Ay*Uy + Az*Uz + As*Us) -1.0*A*(Uxx+Uyy+Uzz+Uss)
return fside
elif equa_name == 'multi_scale4D_6':
A = 0.25*(1.0 + tf.cos(10.0*np.pi*x)*tf.cos(20.0*np.pi*y)*tf.cos(20.0*np.pi*z)*tf.cos(10.0*np.pi*s))
Ax = -0.25*10.0*np.pi*tf.sin(10.0*np.pi*x)*tf.cos(20.0*np.pi*y)*tf.cos(20.0*np.pi*z) * tf.cos(10.0*np.pi*s)
Ay = -0.25*20.0*np.pi*tf.cos(10.0*np.pi*x)*tf.sin(20.0*np.pi*y)*tf.cos(20.0*np.pi*z) * tf.cos(10.0* np.pi * s)
Az = -0.25*20.0*np.pi*tf.cos(10.0*np.pi*x)*tf.cos(20.0*np.pi*y)*tf.sin(20.0*np.pi*z) * tf.cos(10.0* np.pi * s)
As = -0.25*10.0*np.pi*tf.cos(10.0*np.pi*x)*tf.cos(20.0*np.pi*y)*tf.cos(20.0*np.pi*z) * tf.sin(10.0* np.pi * s)
U = tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
0.05*tf.sin(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(15.0*np.pi*z)*tf.sin(20.0*np.pi*s)
Ux = np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
0.05*5.0*np.pi*tf.cos(5 * np.pi * x)*tf.sin(10 * np.pi*y)*tf.sin(15 * np.pi * z) * tf.sin(20*np.pi * s)
Uy = np.pi*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
0.05*10.0*np.pi* tf.sin(5 * np.pi * x) * tf.cos(10 * np.pi * y) * tf.sin(15 * np.pi*z)*tf.sin(20*np.pi*s)
Uz = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z)*tf.sin(np.pi*s)+\
0.05*15.0*np.pi* tf.sin(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.cos(15 * np.pi* z)*tf.sin(20*np.pi*s)
Us = tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.cos(np.pi*s)+\
0.05*20.0*np.pi * tf.sin(5 * np.pi * x) * tf.sin(10 * np.pi * y) * tf.sin(15 * np.pi*z)*tf.cos(20*np.pi*s)
Uxx = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
-0.05*25.0*np.pi*np.pi*tf.sin(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(15.0*np.pi*z)*tf.sin(20.0*np.pi*s)
Uyy = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
-0.05*100.0*np.pi*np.pi*tf.sin(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(15.0*np.pi*z)*tf.sin(20.0*np.pi*s)
Uzz = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
-0.05*125.0*np.pi*np.pi*tf.sin(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(15.0*np.pi*z)*tf.sin(20.0*np.pi*s)
Uss = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)+\
-0.05*400.0*np.pi*np.pi*tf.sin(5.0*np.pi*x)*tf.sin(10.0*np.pi*y)*tf.sin(15.0*np.pi*z)*tf.sin(20.0*np.pi*s)
fside = -1.0*(Ax*Ux + Ay*Uy + Az*Uz + As*Us) -1.0*A*(Uxx+Uyy+Uzz+Uss)
return fside
def get_infos2pLaplace_5D(input_dim=1, out_dim=1, mesh_number=2, intervalL=0.0, intervalR=1.0, equa_name=None):
if equa_name == 'multi_scale5D_1':
fside = lambda x, y, z, s, t: 5.0 * ((np.pi) ** 2) * tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z)* tf.sin(np.pi * s)* tf.sin(np.pi * t)
u_true = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z)* tf.sin(np.pi * s)* tf.sin(np.pi * t)
A_eps = lambda x, y, z, s, t: 1.0
u_00 = lambda x, y, z, s, t: tf.sin(np.pi * intervalL) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(np.pi * s) * tf.sin(np.pi * t)
u_01 = lambda x, y, z, s, t: tf.sin(np.pi * intervalR) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(np.pi * s) * tf.sin(np.pi * t)
u_10 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * intervalL) * tf.sin(np.pi * z) * tf.sin(np.pi * s) * tf.sin(np.pi * t)
u_11 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * intervalR) * tf.sin(np.pi * z) * tf.sin(np.pi * s) * tf.sin(np.pi * t)
u_20 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * intervalL) * tf.sin(np.pi * s) * tf.sin(np.pi * t)
u_21 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * intervalR) * tf.sin(np.pi * s) * tf.sin(np.pi * t)
u_30 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(np.pi * intervalL) * tf.sin(np.pi * t)
u_31 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(np.pi * intervalR) * tf.sin(np.pi * t)
u_40 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(np.pi * s) * tf.sin(np.pi * intervalL)
u_41 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(np.pi * s) * tf.sin(np.pi * intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31, u_40, u_41
elif equa_name == 'multi_scale5D_2':
u_true = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(np.pi * s) * tf.sin(np.pi * t)
fside = lambda x, y, z, s, t: 5.0*((np.pi)**2)*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)\
*(1.0+tf.cos(np.pi*x)*tf.cos(2*np.pi*y)*tf.cos(3*np.pi*z)*tf.cos(2*np.pi*s)*tf.cos(np.pi*t))\
+((np.pi)**2)*tf.sin(np.pi*x)*tf.cos(2*np.pi*y)*tf.cos(3*np.pi*z)*tf.cos(2*np.pi*s)*tf.cos(np.pi*t)\
*tf.cos(np.pi * x) * tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)\
+(2.0*(np.pi)**2)*tf.cos(np.pi*x)*tf.sin(2*np.pi*y)*tf.cos(3*np.pi*z)*tf.cos(2*np.pi*s)*tf.cos(np.pi*t)\
*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)\
+(3.0*(np.pi)** 2)*tf.cos(np.pi*x)*tf.cos(2*np.pi*y)*tf.sin(3*np.pi*z)*tf.cos(2*np.pi*s)*tf.cos(np.pi*t)\
*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)\
+(2.0*(np.pi)**2)*tf.cos(np.pi*x)*tf.cos(2*np.pi*y)*tf.cos(3*np.pi*z)*tf.sin(2*np.pi*s)*tf.cos(np.pi*t)\
*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.cos(np.pi*s)*tf.sin(np.pi*t)\
+((np.pi)**2)*tf.cos(np.pi*x)*tf.cos(2*np.pi*y)*tf.cos(3*np.pi*z)*tf.cos(2*np.pi*s)*tf.sin(np.pi*t) \
*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.cos(np.pi*t)
A_eps = lambda x, y, z, s, t: 1.0 + tf.cos(np.pi*x)*tf.cos(2*np.pi*y)*tf.cos(3*np.pi*z)*tf.cos(2*np.pi*s)*tf.cos(np.pi*t)
u_00 = lambda x, y, z, s, t: tf.sin(np.pi * intervalL) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(
np.pi * s) * tf.sin(np.pi * t)
u_01 = lambda x, y, z, s, t: tf.sin(np.pi * intervalR) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(
np.pi * s) * tf.sin(np.pi * t)
u_10 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * intervalL) * tf.sin(np.pi * z) * tf.sin(
np.pi * s) * tf.sin(np.pi * t)
u_11 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * intervalR) * tf.sin(np.pi * z) * tf.sin(
np.pi * s) * tf.sin(np.pi * t)
u_20 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * intervalL) * tf.sin(
np.pi * s) * tf.sin(np.pi * t)
u_21 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * intervalR) * tf.sin(
np.pi * s) * tf.sin(np.pi * t)
u_30 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(
np.pi * intervalL) * tf.sin(np.pi * t)
u_31 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(
np.pi * intervalR) * tf.sin(np.pi * t)
u_40 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(
np.pi * s) * tf.sin(np.pi * intervalL)
u_41 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(
np.pi * s) * tf.sin(np.pi * intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31, u_40, u_41
elif equa_name == 'multi_scale5D_3':
u_true = lambda x, y, z, s, t: 0.5*tf.sin(np.pi * x) * tf.sin(5*np.pi * y) * tf.sin(10*np.pi * z)* tf.sin(5*np.pi * s)* tf.sin(np.pi * t)
fside = lambda x, y, z, s, t: 19*((np.pi)**2)*tf.sin(np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(np.pi*t)\
*(1.0+tf.cos(np.pi*x)*tf.cos(10*np.pi*y)*tf.cos(20*np.pi*z)*tf.cos(10*np.pi*s)*tf.cos(np.pi*t))\
+0.125*((np.pi)**2)*tf.sin(np.pi*x)*tf.cos(10*np.pi*y)*tf.cos(20*np.pi*z)*tf.cos(10*np.pi*s)*tf.cos(np.pi*t)\
*tf.cos(np.pi * x) * tf.sin(5*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(np.pi*t)\
+6.25*((np.pi)**2)*tf.cos(np.pi*x)*tf.sin(10*np.pi*y)*tf.cos(20*np.pi*z)*tf.cos(10*np.pi*s)*tf.cos(np.pi*t)\
*tf.sin(np.pi*x)*tf.cos(5*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(np.pi*t)\
+25*((np.pi)** 2)*tf.cos(np.pi*x)*tf.cos(10*np.pi*y)*tf.sin(20*np.pi*z)*tf.cos(10*np.pi*s)*tf.cos(np.pi*t)\
*tf.sin(np.pi*x)*tf.sin(5*np.pi*y)*tf.cos(10*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(np.pi*t)\
+6.25*((np.pi)**2)*tf.cos(np.pi*x)*tf.cos(10*np.pi*y)*tf.cos(20*np.pi*z)*tf.sin(10*np.pi*s)*tf.cos(np.pi*t)\
*tf.sin(np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(10*np.pi*z)*tf.cos(5*np.pi*s)*tf.sin(np.pi*t)\
+0.125*((np.pi)**2)*tf.cos(np.pi*x)*tf.cos(10*np.pi*y)*tf.cos(20*np.pi*z)*tf.cos(10*np.pi*s)*tf.sin(np.pi*t) \
*tf.sin(np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(5*np.pi*s)*tf.cos(np.pi*t)
A_eps = lambda x, y, z, s, t: 0.25*(1.0 + tf.cos(np.pi*x)*tf.cos(10*np.pi*y)*tf.cos(20*np.pi*z)*tf.cos(10*np.pi*s)*tf.cos(np.pi*t))
u_00 = lambda x, y, z, s, t: 0.5 * tf.sin(np.pi * intervalL) * tf.sin(5 * np.pi * y) * \
tf.sin(10 * np.pi * z) * tf.sin(5 * np.pi * s) * tf.sin(np.pi * t)
u_01 = lambda x, y, z, s, t: 0.5 * tf.sin(np.pi * intervalR) * tf.sin(5 * np.pi * y) * \
tf.sin(10 * np.pi * z) * tf.sin(5 * np.pi * s) * tf.sin(np.pi * t)
u_10 = lambda x, y, z, s, t: 0.5 * tf.sin(np.pi * x) * tf.sin(5 * np.pi * intervalL) * tf.sin(10 * np.pi * z) \
* tf.sin(5 * np.pi * s) * tf.sin(np.pi * t)
u_11 = lambda x, y, z, s, t: 0.5 * tf.sin(np.pi * x) * tf.sin(5 * np.pi * intervalR) * tf.sin(10 * np.pi * z) \
* tf.sin(5 * np.pi * s) * tf.sin(np.pi * t)
u_20 = lambda x, y, z, s, t: 0.5 * tf.sin(np.pi * x) * tf.sin(5 * np.pi * y) * tf.sin(10 * np.pi * intervalL) \
* tf.sin(5 * np.pi * s) * tf.sin(np.pi * t)
u_21 = lambda x, y, z, s, t: 0.5 * tf.sin(np.pi * x) * tf.sin(5 * np.pi * y) * tf.sin(10 * np.pi * intervalR) \
* tf.sin(5 * np.pi * s) * tf.sin(np.pi * t)
u_30 = lambda x, y, z, s, t: 0.5 * tf.sin(np.pi * x) * tf.sin(5 * np.pi * y) * tf.sin(10 * np.pi * z) * \
tf.sin(5 * np.pi * intervalL) * tf.sin(np.pi * t)
u_31 = lambda x, y, z, s, t: 0.5 * tf.sin(np.pi * x) * tf.sin(5 * np.pi * y) * tf.sin(10 * np.pi * z) * \
tf.sin(5 * np.pi * intervalR) * tf.sin(np.pi * t)
u_40 = lambda x, y, z, s, t: 0.5 * tf.sin(np.pi * x) * tf.sin(5 * np.pi * y) * tf.sin(10 * np.pi * z) * \
tf.sin(5 * np.pi * s) * tf.sin(np.pi * intervalL)
u_41 = lambda x, y, z, s, t: 0.5 * tf.sin(np.pi * x) * tf.sin(5 * np.pi * y) * tf.sin(10 * np.pi * z) * \
tf.sin(np.pi * s) * tf.sin(np.pi * intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31, u_40, u_41
elif equa_name == 'multi_scale5D_4':
u_true = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)\
+ 0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*\
tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
fside = lambda x, y, z, s, t: tf.ones_like(x)
A_eps = lambda x, y, z, s, t: tf.ones_like(x)
u_00 = lambda x, y, z, s, t: tf.sin(np.pi*intervalL)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi * s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*intervalL)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_01 = lambda x, y, z, s, t: tf.sin(np.pi*intervalR)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*intervalR)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_10 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*intervalL)*tf.sin(np.pi*z)*tf.sin(np.pi * s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*intervalL)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_11 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*intervalR)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*intervalR)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_20 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalL)*tf.sin(np.pi * s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*intervalL)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_21 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalR)*tf.sin(np.pi*s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*intervalR)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_30 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi * intervalL)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*intervalL)*tf.sin(10*np.pi*t)
u_31 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*intervalR)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*intervalR)*tf.sin(10*np.pi*t)
u_40 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi * s)*\
tf.sin(np.pi*intervalL)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*intervalL)
u_41 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*\
tf.sin(np.pi*intervalR)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31, u_40, u_41
elif equa_name == 'multi_scale5D_5':
u_true = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+\
0.5*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
fside = lambda x, y, z, s, t: tf.ones_like(x)
A_eps = lambda x, y, z, s, t: tf.ones_like(x)
u_00 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.5*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_01 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.5*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_10 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.5*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_11 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.5*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_20 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.5*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_21 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.5*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_30 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.5*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_31 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.5*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_40 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.5*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_41 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.5*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31, u_40, u_41
elif equa_name == 'multi_scale5D_6':
u_true = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+\
0.1*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
fside = lambda x, y, z, s, t: tf.ones_like(x)
A_eps = lambda x, y, z, s, t: tf.ones_like(x)
u_00 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.1*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_01 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.1*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_10 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.1*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_11 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.1*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_20 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.5*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_21 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.1*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_30 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.1*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_31 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.1*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_40 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.1*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
u_41 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+ \
0.1*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31, u_40, u_41
elif equa_name == 'multi_scale5D_7':
u_true = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)\
+ 0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*\
tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
fside = lambda x, y, z, s, t: tf.ones_like(x)
A_eps = lambda x, y, z, s, t: 0.5 + 0.5*tf.cos(10*np.pi*x)*tf.cos(10*np.pi*y)*tf.cos(10*np.pi*z)*tf.cos(10*np.pi*s)*tf.cos(10*np.pi*t)
u_00 = lambda x, y, z, s, t: tf.sin(np.pi*intervalL)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi * s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*intervalL)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_01 = lambda x, y, z, s, t: tf.sin(np.pi*intervalR)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*intervalR)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_10 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*intervalL)*tf.sin(np.pi*z)*tf.sin(np.pi * s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*intervalL)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_11 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*intervalR)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*intervalR)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_20 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalL)*tf.sin(np.pi * s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*intervalL)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_21 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalR)*tf.sin(np.pi*s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*intervalR)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_30 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi * intervalL)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*intervalL)*tf.sin(10*np.pi*t)
u_31 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*intervalR)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*intervalR)*tf.sin(10*np.pi*t)
u_40 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi * s)*\
tf.sin(np.pi*intervalL)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*intervalL)
u_41 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*\
tf.sin(np.pi*intervalR)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31, u_40, u_41
elif equa_name == 'multi_scale5D_8':
u_true = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+\
0.5*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t) +\
0.05*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t) + \
0.01*tf.sin(100*np.pi*x)*tf.sin(100*np.pi*y)*tf.sin(100*np.pi*z)*tf.sin(100*np.pi*s)*tf.sin(100*np.pi*t)
fside = lambda x, y, z, s, t: tf.ones_like(x)
A_eps = lambda x, y, z, s, t: 1.0
u_00 = lambda x, y, z, s, t: tf.sin(np.pi*intervalL)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi * s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*intervalL)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_01 = lambda x, y, z, s, t: tf.sin(np.pi*intervalR)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*intervalR)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_10 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*intervalL)*tf.sin(np.pi*z)*tf.sin(np.pi * s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*intervalL)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_11 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*intervalR)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*intervalR)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_20 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalL)*tf.sin(np.pi * s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*intervalL)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_21 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalR)*tf.sin(np.pi*s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*intervalR)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_30 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi * intervalL)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*intervalL)*tf.sin(10*np.pi*t)
u_31 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*intervalR)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*intervalR)*tf.sin(10*np.pi*t)
u_40 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi * s)*\
tf.sin(np.pi*intervalL)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*intervalL)
u_41 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*\
tf.sin(np.pi*intervalR)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31, u_40, u_41
elif equa_name == 'multi_scale5D_9':
u_true = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+\
0.1*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t) +\
0.01*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t)
fside = lambda x, y, z, s, t: tf.ones_like(x)
A_eps = lambda x, y, z, s, t: 1.0
u_00 = lambda x, y, z, s, t: tf.sin(np.pi*intervalL)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi * s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*intervalL)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_01 = lambda x, y, z, s, t: tf.sin(np.pi*intervalR)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*intervalR)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_10 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*intervalL)*tf.sin(np.pi*z)*tf.sin(np.pi * s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*intervalL)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_11 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*intervalR)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*intervalR)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_20 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalL)*tf.sin(np.pi * s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*intervalL)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_21 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalR)*tf.sin(np.pi*s)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*intervalR)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
u_30 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi * intervalL)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*intervalL)*tf.sin(10*np.pi*t)
u_31 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*intervalR)*\
tf.sin(np.pi*t)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*intervalR)*tf.sin(10*np.pi*t)
u_40 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi * s)*\
tf.sin(np.pi*intervalL)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*intervalL)
u_41 = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*\
tf.sin(np.pi*intervalR)+0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*\
tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*intervalR)
return u_true, fside, A_eps, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31, u_40, u_41
def get_forceSide2pLaplace5D(x=None, y=None, z=None, s=None, t=None, equa_name='multi_scale5D_5'):
if equa_name == 'multi_scale5D_4':
u_true = tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+\
0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
Aeps = 1.0
ux = np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.5*np.pi*tf.cos(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
uy = np.pi*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.5*np.pi*tf.sin(10*np.pi*x)*tf.cos(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
uz = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.5*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.cos(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
us = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.cos(np.pi*s)*tf.sin(np.pi*t) + \
0.5*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.cos(10*np.pi*s)*tf.sin(10*np.pi*t)
ut = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.cos(np.pi*t) + \
0.5*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.cos(10*np.pi*t)
uxx = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
5.0*np.pi*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
uyy = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
5.0*np.pi*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
uzz =-1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
5.0*np.pi*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
uss = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
5.0*np.pi*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
utt = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
5.0*np.pi*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
Aepsx = 1.0
Aepsy = 1.0
Aepsz = 1.0
Aepss = 1.0
Aepst = 1.0
fside = -1.0*(Aepsx*ux + Aepsy*uy + Aepsz*uz + Aepss*us + Aepst*ut) - 1.0*Aeps*(uxx+uyy+uzz+uss+utt)
return fside
elif equa_name == 'multi_scale5D_5':
u_true = tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+\
0.5*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
Aeps = 1.0
ux = np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
2.5*np.pi*tf.cos(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
uy = np.pi*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
2.5*np.pi*tf.sin(5*np.pi*x)*tf.cos(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
uz = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
2.5*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.cos(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
us = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.cos(np.pi*s)*tf.sin(np.pi*t) + \
2.5*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.cos(5*np.pi*s)*tf.sin(5*np.pi*t)
ut = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.cos(np.pi*t) + \
2.5*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.cos(5*np.pi*t)
uxx = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
12.5*np.pi*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
uyy = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
12.5*np.pi*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
uzz =-1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
12.5*np.pi*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
uss = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
12.5*np.pi*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
utt = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
12.5*np.pi*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
Aepsx = 1.0
Aepsy = 1.0
Aepsz = 1.0
Aepss = 1.0
Aepst = 1.0
fside = -1.0*(Aepsx*ux + Aepsy*uy + Aepsz*uz + Aepss*us + Aepst*ut) - 1.0*Aeps*(uxx+uyy+uzz+uss+utt)
return fside
elif equa_name == 'multi_scale5D_6':
u_true = tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+\
0.1*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
Aeps = 1.0
ux = np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.5*np.pi*tf.cos(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
uy = np.pi*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.5*np.pi*tf.sin(5*np.pi*x)*tf.cos(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
uz = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.5*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.cos(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
us = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.cos(np.pi*s)*tf.sin(np.pi*t) + \
0.5*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.cos(5*np.pi*s)*tf.sin(5*np.pi*t)
ut = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.cos(np.pi*t) + \
0.5*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.cos(5*np.pi*t)
uxx = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
2.5*np.pi*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
uyy = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
2.5*np.pi*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
uzz =-1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
2.5*np.pi*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
uss = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
2.5*np.pi*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
utt = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
2.5*np.pi*np.pi*tf.sin(5*np.pi*x)*tf.sin(5*np.pi*y)*tf.sin(5*np.pi*z)*tf.sin(5*np.pi*s)*tf.sin(5*np.pi*t)
Aepsx = 1.0
Aepsy = 1.0
Aepsz = 1.0
Aepss = 1.0
Aepst = 1.0
fside = -1.0*(Aepsx*ux + Aepsy*uy + Aepsz*uz + Aepss*us + Aepst*ut) - 1.0*Aeps*(uxx+uyy+uzz+uss+utt)
return fside
elif equa_name == 'multi_scale5D_7':
u_true = tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+\
0.05*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
Aeps = 0.5 + 0.5*tf.cos(10*np.pi*x)*tf.cos(10*np.pi*y)*tf.cos(10*np.pi*z)*tf.cos(10*np.pi*s)*tf.cos(10*np.pi*t)
ux = np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.5*np.pi*tf.cos(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
uy = np.pi*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.5*np.pi*tf.sin(10*np.pi*x)*tf.cos(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
uz = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.5*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.cos(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
us = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.cos(np.pi*s)*tf.sin(np.pi*t) + \
0.5*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.cos(10*np.pi*s)*tf.sin(10*np.pi*t)
ut = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.cos(np.pi*t) + \
0.5*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.cos(10*np.pi*t)
uxx = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
5.0*np.pi*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
uyy = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
5.0*np.pi*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
uzz =-1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
5.0*np.pi*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
uss = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
5.0*np.pi*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
utt = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) - \
5.0*np.pi*np.pi*tf.sin(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.sin(10*np.pi*t)
Aepsx = -0.5*10*np.pi*tf.sin(10*np.pi*x)*tf.cos(10*np.pi*y)*tf.cos(10*np.pi*z)*tf.cos(10*np.pi*s)*tf.cos(10*np.pi*t)
Aepsy = -0.5*10*np.pi*tf.cos(10*np.pi*x)*tf.sin(10*np.pi*y)*tf.cos(10*np.pi*z)*tf.cos(10*np.pi*s)*tf.cos(10*np.pi*t)
Aepsz = -0.5*10*np.pi*tf.cos(10*np.pi*x)*tf.cos(10*np.pi*y)*tf.sin(10*np.pi*z)*tf.cos(10*np.pi*s)*tf.cos(10*np.pi*t)
Aepss = -0.5*10*np.pi*tf.cos(10*np.pi*x)*tf.cos(10*np.pi*y)*tf.cos(10*np.pi*z)*tf.sin(10*np.pi*s)*tf.cos(10*np.pi*t)
Aepst = -0.5*10*np.pi*tf.cos(10*np.pi*x)*tf.cos(10*np.pi*y)*tf.cos(10*np.pi*z)*tf.cos(10*np.pi*s)*tf.sin(10*np.pi*t)
fside = -1.0*(Aepsx*ux + Aepsy*uy + Aepsz*uz + Aepss*us + Aepst*ut) - 1.0*Aeps*(uxx+uyy+uzz+uss+utt)
return fside
elif equa_name == 'multi_scale5D_8':
u_true = tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+\
0.5*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t) +\
0.05*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t) + \
0.01*tf.sin(100*np.pi*x)*tf.sin(100*np.pi*y)*tf.sin(100*np.pi*z)*tf.sin(100*np.pi*s)*tf.sin(100*np.pi*t)
Aeps = 1.0
ux = np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.5*25.0*np.pi*tf.cos(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t)+\
0.05*50*np.pi*tf.cos(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t) + \
np.pi*tf.cos(100*np.pi*x)*tf.sin(100*np.pi*y)*tf.sin(100*np.pi*z)*tf.sin(100*np.pi*s)*tf.sin(100*np.pi*t)
uy = np.pi*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.5*25*np.pi*tf.sin(25*np.pi*x)*tf.cos(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t)+ \
0.05*50*np.pi*tf.sin(50*np.pi*x)*tf.cos(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t) + \
np.pi**tf.sin(100*np.pi*x)*tf.cos(100*np.pi*y)*tf.sin(100*np.pi*z)*tf.sin(100*np.pi*s)*tf.sin(100*np.pi*t)
uz = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.5*25*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.cos(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t)+\
0.05*50*np.pi*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.cos(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t) + \
np.pi*tf.sin(100*np.pi*x)*tf.sin(100*np.pi*y)*tf.cos(100*np.pi*z)*tf.sin(100*np.pi*s)*tf.sin(100*np.pi*t)
us = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.cos(np.pi*s)*tf.sin(np.pi*t) + \
0.5*25*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.cos(25*np.pi*s)*tf.sin(25*np.pi*t) + \
0.05*50*np.pitf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.cos(50*np.pi*s)*tf.sin(50*np.pi*t) + \
np.pi*tf.sin(100*np.pi*x)*tf.sin(100*np.pi*y)*tf.sin(100*np.pi*z)*tf.cos(100*np.pi*s)*tf.sin(100*np.pi*t)
ut = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.cos(np.pi*t) + \
0.5*25*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.cos(25*np.pi*t)+ \
0.05*50*np.pi*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.cos(50*np.pi*t) + \
np.pi*tf.sin(100*np.pi*x)*tf.sin(100*np.pi*y)*tf.sin(100*np.pi*z)*tf.sin(100*np.pi*s)*tf.cos(100*np.pi*t)
uxx = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) -\
312.5*np.pi*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t) -\
125*np.pi*np.pi*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t) - \
100*np.pi*np.pi*tf.sin(100*np.pi*x)*tf.sin(100*np.pi*y)*tf.sin(100*np.pi*z)*tf.sin(100*np.pi*s)*tf.sin(100*np.pi*t)
uyy = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) -\
312.5*np.pi*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t) -\
125*np.pi*np.pi*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t) - \
100*np.pi*np.pi*tf.sin(100*np.pi*x)*tf.sin(100*np.pi*y)*tf.sin(100*np.pi*z)*tf.sin(100*np.pi*s)*tf.sin(100*np.pi*t)
uzz = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) -\
312.5*np.pi*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t) -\
125*np.pi*np.pi*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t) - \
100*np.pi*np.pi*tf.sin(100*np.pi*x)*tf.sin(100*np.pi*y)*tf.sin(100*np.pi*z)*tf.sin(100*np.pi*s)*tf.sin(100*np.pi*t)
uss = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) -\
312.5*np.pi*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t) -\
125*np.pi*np.pi*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t) - \
100*np.pi*np.pi*tf.sin(100*np.pi*x)*tf.sin(100*np.pi*y)*tf.sin(100*np.pi*z)*tf.sin(100*np.pi*s)*tf.sin(100*np.pi*t)
utt = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) -\
312.5*np.pi*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t) -\
125*np.pi*np.pi*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t) - \
100*np.pi*np.pi*tf.sin(100*np.pi*x)*tf.sin(100*np.pi*y)*tf.sin(100*np.pi*z)*tf.sin(100*np.pi*s)*tf.sin(100*np.pi*t)
Aepsx = 1.0
Aepsy = 1.0
Aepsz = 1.0
Aepss = 1.0
Aepst = 1.0
fside = -1.0*(Aepsx*ux + Aepsy*uy + Aepsz*uz + Aepss*us + Aepst*ut) - 1.0*Aeps*(uxx+uyy+uzz+uss+utt)
return fside
elif equa_name == 'multi_scale5D_9':
u_true = tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)+\
0.1*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t) +\
0.01*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t)
Aeps = 1.0
ux = np.pi*tf.cos(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.1*25.0*np.pi*tf.cos(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t)+\
0.01*50*np.pi*tf.cos(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t)
uy = np.pi*tf.sin(np.pi*x)*tf.cos(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.1*25*np.pi*tf.sin(25*np.pi*x)*tf.cos(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t)+ \
0.01*50*np.pi*tf.sin(50*np.pi*x)*tf.cos(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t)
uz = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.cos(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) + \
0.1*25*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.cos(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t)+\
0.01*50*np.pi*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.cos(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t)
us = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.cos(np.pi*s)*tf.sin(np.pi*t) + \
0.1*25*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.cos(25*np.pi*s)*tf.sin(25*np.pi*t) + \
0.01*50*np.pitf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.cos(50*np.pi*s)*tf.sin(50*np.pi*t)
ut = np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.cos(np.pi*t) + \
0.1*25*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.cos(25*np.pi*t)+ \
0.01*50*np.pi*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.cos(50*np.pi*t)
uxx = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) -\
62.5*np.pi*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t) -\
25.0*np.pi*np.pi*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t)
uyy = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) -\
62.5*np.pi*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t) -\
25.0*np.pi*np.pi*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t)
uzz = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) -\
62.5*np.pi*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t) -\
25.0*np.pi*np.pi*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t)
uss = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) -\
62.5*np.pi*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t) -\
25.0*np.pi*np.pi*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t)
utt = -1.0*np.pi*np.pi*tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t) -\
62.5*np.pi*np.pi*tf.sin(25*np.pi*x)*tf.sin(25*np.pi*y)*tf.sin(25*np.pi*z)*tf.sin(25*np.pi*s)*tf.sin(25*np.pi*t) -\
25.0*np.pi*np.pi*tf.sin(50*np.pi*x)*tf.sin(50*np.pi*y)*tf.sin(50*np.pi*z)*tf.sin(50*np.pi*s)*tf.sin(50*np.pi*t)
Aepsx = 1.0
Aepsy = 1.0
Aepsz = 1.0
Aepss = 1.0
Aepst = 1.0
fside = -1.0*(Aepsx*ux + Aepsy*uy + Aepsz*uz + Aepss*us + Aepst*ut) - 1.0*Aeps*(uxx+uyy+uzz+uss+utt)
return fside
def get_infos2pLaplace_10D(input_dim=1, out_dim=1, mesh_number=2, intervalL=0.0, intervalR=1.0, equa_name=None):
if equa_name == 'multi_scale10D_1':
fside = lambda x, y, z: 10.0 * ((np.pi)**2) * (tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z))
u_true = lambda x, y, z: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z)
A_eps = lambda x, y, z: 1.0
return u_true, fside, A_eps
| 76.355479
| 158
| 0.508347
| 25,538
| 111,479
| 2.174133
| 0.008301
| 0.257263
| 0.150532
| 0.193541
| 0.974443
| 0.968536
| 0.964141
| 0.960413
| 0.953119
| 0.940745
| 0
| 0.075979
| 0.239085
| 111,479
| 1,460
| 159
| 76.355479
| 0.578573
| 0.018865
| 0
| 0.626496
| 0
| 0
| 0.00788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035897
| false
| 0
| 0.002564
| 0
| 0.098291
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e864ea0796f0d1552b8c81c0428c23680ccb90d5
| 9,278
|
py
|
Python
|
saleor/graphql/product/tests/test_variant_query.py
|
ecompw/saleor
|
d94093e64a9012f02332968f65d6465c597ecd79
|
[
"CC-BY-4.0"
] | 1
|
2020-09-02T00:14:04.000Z
|
2020-09-02T00:14:04.000Z
|
saleor/graphql/product/tests/test_variant_query.py
|
tracymelody/etang-api
|
79b78792204d81a43c7ceedc604ca221c747dd2d
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/product/tests/test_variant_query.py
|
tracymelody/etang-api
|
79b78792204d81a43c7ceedc604ca221c747dd2d
|
[
"CC-BY-4.0"
] | null | null | null |
import graphene
import pytest
from ...tests.utils import assert_graphql_error_with_message, get_graphql_content
VARIANT_QUERY = """
query variant($id: ID, $sku: String){
productVariant(id:$id, sku:$sku){
sku
}
}
"""
def test_get_variant_without_id_and_sku(staff_api_client, permission_manage_products):
# given
# when
response = staff_api_client.post_graphql(
VARIANT_QUERY,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
assert_graphql_error_with_message(
response, "Either 'id' or 'sku' argument is required"
)
def test_get_variant_with_id_and_sku(staff_api_client, permission_manage_products):
# given
variables = {"id": "ID", "sku": "sku"}
# when
response = staff_api_client.post_graphql(
VARIANT_QUERY,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
assert_graphql_error_with_message(
response, "Argument 'id' cannot be combined with 'sku'"
)
def test_get_unpublished_variant_by_id_as_staff(
staff_api_client, permission_manage_products, unavailable_product_with_variant
):
# given
variant = unavailable_product_with_variant.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id}
# when
response = staff_api_client.post_graphql(
VARIANT_QUERY,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["sku"] == variant.sku
@pytest.mark.skip(reason="Issue #5845")
def test_get_unpublished_variant_by_id_as_app(
app_api_client, permission_manage_products, unavailable_product_with_variant
):
# given
variant = unavailable_product_with_variant.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id}
# when
response = app_api_client.post_graphql(
VARIANT_QUERY,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["sku"] == variant.sku
def test_get_unpublished_variant_by_id_as_customer(
user_api_client, unavailable_product_with_variant
):
# given
variant = unavailable_product_with_variant.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id}
# when
response = user_api_client.post_graphql(
VARIANT_QUERY, variables, check_no_permissions=False,
)
# then
content = get_graphql_content(response)
assert not content["data"]["productVariant"]
def test_get_unpublished_variant_by_id_as_anonymous_user(
api_client, unavailable_product_with_variant
):
# given
variant = unavailable_product_with_variant.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id}
# when
response = api_client.post_graphql(
VARIANT_QUERY, variables, check_no_permissions=False,
)
# then
content = get_graphql_content(response)
assert not content["data"]["productVariant"]
def test_get_variant_by_id_as_staff(
staff_api_client, permission_manage_products, variant
):
# given
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id}
# when
response = staff_api_client.post_graphql(
VARIANT_QUERY,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["sku"] == variant.sku
def test_get_variant_by_id_as_app(app_api_client, permission_manage_products, variant):
# given
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id}
# when
response = app_api_client.post_graphql(
VARIANT_QUERY,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["sku"] == variant.sku
def test_get_variant_by_id_as_customer(user_api_client, variant):
# given
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id}
# when
response = user_api_client.post_graphql(
VARIANT_QUERY, variables, check_no_permissions=False,
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["sku"] == variant.sku
def test_get_variant_by_id_as_anonymous_user(api_client, variant):
# given
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id}
# when
response = api_client.post_graphql(
VARIANT_QUERY, variables, check_no_permissions=False,
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["sku"] == variant.sku
def test_get_unpublished_variant_by_sku_as_staff(
staff_api_client, permission_manage_products, unavailable_product_with_variant
):
# given
variant = unavailable_product_with_variant.variants.first()
variables = {"sku": variant.sku}
# when
response = staff_api_client.post_graphql(
VARIANT_QUERY,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["sku"] == variant.sku
@pytest.mark.skip(reason="Issue #5845")
def test_get_unpublished_variant_by_sku_as_app(
app_api_client, permission_manage_products, unavailable_product_with_variant
):
# given
variant = unavailable_product_with_variant.variants.first()
variables = {"sku": variant.sku}
# when
response = app_api_client.post_graphql(
VARIANT_QUERY,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["sku"] == variant.sku
def test_get_unpublished_variant_by_sku_as_customer(
user_api_client, unavailable_product_with_variant
):
# given
variant = unavailable_product_with_variant.variants.first()
variables = {"sku": variant.sku}
# when
response = user_api_client.post_graphql(
VARIANT_QUERY, variables, check_no_permissions=False,
)
# then
content = get_graphql_content(response)
assert not content["data"]["productVariant"]
def test_get_unpublished_variant_by_sku_as_anonymous_user(
api_client, unavailable_product_with_variant
):
# given
variant = unavailable_product_with_variant.variants.first()
variables = {"sku": variant.sku}
# when
response = api_client.post_graphql(
VARIANT_QUERY, variables, check_no_permissions=False,
)
# then
content = get_graphql_content(response)
assert not content["data"]["productVariant"]
def test_get_variant_by_sku_as_staff(
staff_api_client, permission_manage_products, variant
):
# given
variables = {"sku": variant.sku}
# when
response = staff_api_client.post_graphql(
VARIANT_QUERY,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["sku"] == variant.sku
def test_get_variant_by_sku_as_app(app_api_client, permission_manage_products, variant):
# given
variables = {"sku": variant.sku}
# when
response = app_api_client.post_graphql(
VARIANT_QUERY,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["sku"] == variant.sku
def test_get_variant_by_sku_as_customer(user_api_client, variant):
# given
variables = {"sku": variant.sku}
# when
response = user_api_client.post_graphql(
VARIANT_QUERY, variables, check_no_permissions=False,
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["sku"] == variant.sku
def test_get_variant_by_sku_as_anonymous_user(api_client, variant):
# given
variables = {"sku": variant.sku}
# when
response = api_client.post_graphql(
VARIANT_QUERY, variables, check_no_permissions=False,
)
# then
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["sku"] == variant.sku
| 26.892754
| 88
| 0.705324
| 1,080
| 9,278
| 5.667593
| 0.063889
| 0.052933
| 0.078419
| 0.058814
| 0.962588
| 0.954583
| 0.954583
| 0.954583
| 0.948211
| 0.935958
| 0
| 0.001077
| 0.199289
| 9,278
| 344
| 89
| 26.97093
| 0.822856
| 0.030933
| 0
| 0.763889
| 0
| 0
| 0.077216
| 0.002462
| 0
| 0
| 0
| 0
| 0.087963
| 1
| 0.083333
| false
| 0
| 0.013889
| 0
| 0.097222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e894bd0a0f19b58bb34347861bd8ee7b5009a5fa
| 162
|
py
|
Python
|
nn_meter/utils/path.py
|
Andyyoung0507/nn-Meter
|
d5d4cd92a2c81a9f6f5806965a80b9217ffc73cc
|
[
"MIT"
] | null | null | null |
nn_meter/utils/path.py
|
Andyyoung0507/nn-Meter
|
d5d4cd92a2c81a9f6f5806965a80b9217ffc73cc
|
[
"MIT"
] | null | null | null |
nn_meter/utils/path.py
|
Andyyoung0507/nn-Meter
|
d5d4cd92a2c81a9f6f5806965a80b9217ffc73cc
|
[
"MIT"
] | null | null | null |
import os
def get_filename(path):
return os.path.basename(path)
def get_filename_without_ext(path):
return os.path.splitext(os.path.basename(path))[0]
| 18
| 54
| 0.746914
| 26
| 162
| 4.5
| 0.461538
| 0.153846
| 0.239316
| 0.273504
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007092
| 0.12963
| 162
| 8
| 55
| 20.25
| 0.822695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
e89529597681dc725cfe573f4ef47d3584109724
| 157
|
py
|
Python
|
erede/service/__init__.py
|
collabo-br/erede-python
|
695ac91cd886d899440fe222f0bb72cd062874e9
|
[
"MIT"
] | null | null | null |
erede/service/__init__.py
|
collabo-br/erede-python
|
695ac91cd886d899440fe222f0bb72cd062874e9
|
[
"MIT"
] | null | null | null |
erede/service/__init__.py
|
collabo-br/erede-python
|
695ac91cd886d899440fe222f0bb72cd062874e9
|
[
"MIT"
] | 1
|
2020-05-26T14:30:03.000Z
|
2020-05-26T14:30:03.000Z
|
from .CancelTransactionService import *
from .CaptureTransactionService import *
from .CreateTransactionService import *
from .GetTransactionService import *
| 39.25
| 40
| 0.853503
| 12
| 157
| 11.166667
| 0.5
| 0.223881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095541
| 157
| 4
| 41
| 39.25
| 0.943662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e8cb80376188e2d83043d145f595e78f28f4ef15
| 88,519
|
py
|
Python
|
code/python/FactSetESG/v1/fds/sdk/FactSetESG/model/sasb_spotlights_categories.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/FactSetESG/v1/fds/sdk/FactSetESG/model/sasb_spotlights_categories.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/FactSetESG/v1/fds/sdk/FactSetESG/model/sasb_spotlights_categories.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
FactSet ESG API
FactSet ESG (powered by FactSet Truvalue Labs) applies machine learning to uncover risks and opportunities from companies' Environmental, Social and Governance (ESG) behavior, which are aggregated and categorized into continuously updated, material ESG scores. The service focuses on company ESG behavior from external sources and includes both positive and negative events that go beyond traditional sources of ESG risk data.<p> FactSet ESG extracts, analyzes, and generates scores from millions of documents each month collected from more than 100,000 data sources in over 13 languages. Sources include news, trade journals, NGOs, watchdog groups, trade blogs, industry reports and social media. Products deliver investable insights by revealing value and risk factors from unstructured data at the speed of current events.</p> # noqa: E501
The version of the OpenAPI document: 1.3.0
Contact: api@factset.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.FactSetESG.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.FactSetESG.exceptions import ApiAttributeError
class SasbSpotlightsCategories(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': ([str],),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""SasbSpotlightsCategories - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([str]): The SASB Categories requested for the respective scoreType(s). The default value is **ALLCATEGORIES**, which represents all 26 categories in a single overall score. To request a specifc category or list of categories, simply input the category names below. ### SASB Categories |**SASB Category Inputs**|**Description**| |---|---| |**ALLCATEGORIES**|**All Categories** - this category represents a company's overall SASB Score for the specific 'scoreType'. This value is equal to the cumulative average of all 26 SASB categories for the specific 'scoreType'.*Note that category is not available for the Dynamic Materiality 'scoreType'. |**ACCESSANDAFFORDABILITY**|**Access and Affordability** - The category addresses a company's ability to ensure broad access to its products and services, specifically in the context of underserved markets and/or population groups. It includes the management of issues related to universal needs, such as the accessibility and affordability of health care, financial services, utilities , education, and telecommunications.| |**AIRQUALITY**|**Air Quality** - the category addresses management of air quality impacts resulting from stationary (e.g., factories, power plants) and mobile sources (e.g., trucks, delivery vehicles, planes) as well as industrial emissions. Relevant airborne pollutants include, but are not limited to, oxides of nitrogen (NOx), oxides of sulfur (SOx), volatile organic compounds (VOCs), heavy metals, particulate matter, and chlorofluorocarbons. The category does not include GHG emissions, which are addressed in a separate category.| |**BUSINESSETHICS**|**Business Ethics** - the category addresses the company's approach to managing risks and opportunities surrounding ethical conduct of business, including fraud, corruption, bribery and facilitation payments, fiduciary responsibilities, and other behavior that may have an ethical component. This includes sensitivity to business norms and standards as they shift over time, jurisdiction, and culture. It addresses the company's ability to provide services that satisfy the highest professional and ethical standards of the industry, which means to avoid conflicts of interest, misrepresentation, bias, and negligence through training employees adequately and implementing policies and procedures to ensure employees provide services free from bias and error.| |**BUSMODELRESILIENCE**|**Business Model Resilience** - the category addresses an industry's capacity to manage risks and opportunities associated with incorporating social, environmental, and political transitions into long-term business model planning. This includes responsiveness to the transition to a low-carbon and climate-constrained economy, as well as growth and creation of new markets among unserved and underserved socioeconomic populations. The category highlights industries in which evolving environmental and social realities may challenge companies to fundamentally adapt or may put their business models at risk.| |**COMPETITIVEBEHAVIOR**|**Competitive Behavior** - the category covers social issues associated with existence of monopolies, which may include, but are not limited to, excessive prices, poor quality of service, and inefficiencies. It addresses a company's management of legal and social expectation around monopolistic and anti-competitive practices, including issues related to bargaining power, collusion, price fixing or manipulation, and protection of patents and intellectual property (IP).| |**CRITINCIDENTRISKMGT**|**Critical Incident Risk Management** - the category addresses the company's use of management systems and scenario planning to identify, understand, and prevent or minimize the occurrence of low-probability, high-impact accidents and emergencies with significant potential environmental and social externalities. It relates to the culture of safety at a company, its relevant safety management systems and technological controls, the potential human, environmental, and social implications of such events occurring, and the long-term effects to an organization, its workers, and society should these events occur.| |**CUSTOMERPRIVACY**|**Customer Privacy** - the category addresses management of risks related to the use of personally identifiable information (PII) and other customer or user data for secondary purposes including but not limited to marketing through affiliates and non-affiliates. The scope of the category includes social issues that may arise from a company's approach to collecting data, obtaining consent (e.g., opt-in policies), managing user and customer expectations regarding how their data is used, and managing evolving regulation. It excludes social issues arising from cybersecurity risks, which are covered in a separate category.| |**CUSTWELFARE**|**Customer Welfare** - the category addresses customer welfare concerns over issues including, but not limited to, health and nutrition of foods and beverages, antibiotic use in animal production, and management of controlled substances. The category addresses the company's ability to provide consumers with manufactured products and services that are aligned with societal expectations. It does not include issues directly related to quality and safety malfunctions of manufactured products and services, but instead addresses qualities inherent to the design and delivery of products and services where customer welfare may be in question. The scope of the category also captures companies' ability to prevent counterfeit products.| |**DATASECURITY**|**Data Security** - the category addresses management of risks related to collection, retention, and use of sensitive, confidential, and/or proprietary customer or user data. It includes social issues that may arise from incidents such as data breaches in which personally identifiable information (PII) and other user or customer data may be exposed. It addresses a company's strategy, policies, and practices related to IT infrastructure, staff training, record keeping, cooperation with law enforcement, and other mechanisms used to ensure security of customer or user data.| |**ECOLOGICALIMPACTS**|**Ecological Impacts** - the category addresses management of the company's impacts on ecosystems and biodiversity through activities including, but not limited to, land use for exploration, natural resource extraction, and cultivation, as well as project development, construction, and siting. The impacts include, but are not limited to, biodiversity loss, habitat destruction, and deforestation at all stages – planning, land acquisition, permitting, development, operations, and site remediation. The category does not cover impacts of climate change on ecosystems and biodiversity.| |**EMPENGDIVANDINC**|**Employee Engagement Diversity and Inclusion** - the category addresses a company's ability to ensure that its culture and hiring and promotion practices embrace the building of a diverse and inclusive workforce that reflects the makeup of local talent pools and its customer base. It addresses the issues of discriminatory practices on the bases of race, gender, ethnicity, religion, sexual orientation, and other factors.| |**EMPHEALTHANDSAFETY**|**Employee Health and Safety** - the category addresses a company's ability to create and maintain a safe and healthy workplace environment that is free of injuries, fatalities, and illness (both chronic and acute). It is traditionally accomplished through implementing safety management plans, developing training requirements for employees and contractors, and conducting regular audits of their own practices as well as those of their subcontractors. The category further captures how companies ensure physical and mental health of workforce through technology, training, corporate culture, regulatory compliance, monitoring and testing, and personal protective equipment.| |**ENERGYMGT**|**Energy Management** - the category addresses environmental impacts associated with energy consumption. It addresses the company's management of energy in manufacturing and/or for provision of products and services derived from utility providers (grid energy) not owned or controlled by the company. More specifically, it includes management of energy efficiency and intensity, energy mix, as well as grid reliance. Upstream (e.g., suppliers) and downstream (e.g., product use) energy use is not included in the scope.| |**GHGEMISSIONS**|**Greenhouse Gas Emissions** - the category addresses direct (Scope 1) greenhouse gas (GHG) emissions that a company generates through its operations. This includes GHG emissions from stationary (e.g., factories, power plants) and mobile sources (e.g., trucks, delivery vehicles, planes), whether a result of combustion of fuel or non-combusted direct releases during activities such as natural resource extraction, power generation, land use, or biogenic processes. The category further includes management of regulatory risks, environmental compliance, and reputational risks and opportunities, as they related to direct GHG emissions. The seven GHGs covered under the Kyoto Protocol are included within the category— carbon dioxide (CO2 ), methane (CH4), nitrous oxide (N2O), hydrofluorocarbons (HFCs), perfluorocarbons (PFCs), sulfur hexafluoride (SF6), and nitrogen trifluoride (NF3).| |**HUMANRIGHTSANDCOMRELS**|**Human Rights and Community Relations** - the category addresses management of the relationship between businesses and the communities in which they operate, including, but not limited to, management of direct and indirect impacts on core human rights and the treatment of indigenous peoples. More specifically, such management may cover socio-economic community impacts, community engagement, environmental justice, cultivation of local workforces, impact on local businesses, license to operate, and environmental/social impact assessments. The category does not include environmental impacts such as air pollution or waste which, although they may impact the health and safety of members of local communities, are addressed in separate categories.| |**LABORPRACTICES**|**Labor Practices** - the category addresses the company's ability to uphold commonly accepted labor standards in the workplace, including compliance with labor laws and internationally accepted norms and standards. This includes, but is not limited to, ensuring basic human rights related to child labor, forced or bonded labor, exploitative labor, fair wages and overtime pay, and other basic workers' rights. It also includes minimum wage policies and provision of benefits, which may influence how a workforce is attracted, retained, and motivated. The category further addresses a company's relationship with organized labor and freedom of association.| |**MGTOFLEGALANDREGENV**|**Management of the Legal and Regulatory Environment** - the category addresses a company's approach to engaging with regulators in cases where conflicting corporate and public interests may have the potential for long-term adverse direct or indirect environmental and social impacts. The category addresses a company's level of reliance upon regulatory policy or monetary incentives (such as subsidies and taxes), actions to influence industry policy (such as through lobbying), overall reliance on a favorable regulatory environment for business competitiveness, and ability to comply with relevant regulations. It may relate to the alignment of management and investor views of regulatory engagement and compliance at large.| |**MATSOURCINGANDEFF**|**Materials Sourcing and Efficiency** - the category addresses issues related to the resilience of materials supply chains to impacts of climate change and other external environmental and social factors. It captures the impacts of such external factors on operational activity of suppliers, which can further affect availability and pricing of key resources. It addresses a company's ability to manage these risks through product design, manufacturing, and end-of-life management, such as by using of recycled and renewable materials, reducing the use of key materials (dematerialization), maximizing resource efficiency in manufacturing, and making R&D investments in substitute materials. Additionally, companies can manage these issues by screening, selection, monitoring, and engagement with suppliers to ensure their resilience to external risks. It does not address issues associated with environmental and social externalities created by operational activity of individual suppliers, which is covered in a separate category.| |**MATERIALITY**|**Materiality** - this category represents a composite score of all 'material' SASB categories for the given entity. For more information on SASB's Materiality Map, visit [materiality.sasb.org](https://materiality.sasb.org/)| |**PHYIMPACTSOFCLIMATECHG**|**Physical Impacts of Climate Change** - the category addresses the company's ability to manage risks and opportunities associated with direct exposure of its owned or controlled assets and operations to actual or potential physical impacts of climate change. It captures environmental and social issues that may arise from operational disruptions due to physical impacts of climate change. It further captures socioeconomic issues resulting from companies failing to incorporate climate change consideration in products and services sold, such as insurance policies and mortgages. The category relates to the company's ability to adapt to increased frequency and severity of extreme weather, shifting climate, sea level risk, and other expected physical impacts of climate change. Management may involve enhancing resiliency of physical assets and/or surrounding infrastructure as well as incorporation of climate change-related considerations into key business activities (e.g., mortgage and insurance underwriting, planning and development of real estate projects).| |**PDANDLIFECYCLEMGT**|**Product Design and Lifecycle Management** - the category addresses incorporation of environmental, social, and governance (ESG) considerations in characteristics of products and services provided or sold by the company. It includes, but is not limited to, managing the lifecycle impacts of products and services, such as those related to packaging, distribution, use-phase resource intensity, and other environmental and social externalities that may occur during their use-phase or at the end of life. The category captures a company's ability to address customer and societal demand for more sustainable products and services as well as to meet evolving environmental and social regulation. It does not address direct environmental or social impacts of the company's operations nor does it address health and safety risks to consumers from product use, which are covered in other categories.| |**PRODQUALITYANDSFTY**|**Product Quality and Safety** - the category addresses issues involving unintended characteristics of products sold or services provided that may create health or safety risks to end-users. It addresses a company's ability to offer manufactured products and/or services that meet customer expectations with respect to their health and safety characteristics. It includes, but is not limited to, issues involving liability, management of recalls and market withdrawals, product testing, and chemicals/content/ ingredient management in products.| |**SELLPRACANDPRODLABEL**|**Selling Practices and Product Labeling** - the category addresses social issues that may arise from a failure to manage the transparency, accuracy, and comprehensibility of marketing statements, advertising, and labeling of products and services. It includes, but is not limited to, advertising standards and regulations, ethical and responsible marketing practices, misleading or deceptive labeling, as well as discriminatory or predatory selling and lending practices. This may include deceptive or aggressive selling practices in which incentive structures for employees could encourage the sale of products or services that are not in the best interest of customers or clients.| |**SUPPLYCHAINMGT**|**Supply Chain Management** - the category addresses management of environmental, social, and governance (ESG) risks within a company's supply chain. It addresses issues associated with environmental and social externalities created by suppliers through their operational activities. Such issues include, but are not limited to, environmental responsibility, human rights, labor practices, and ethics and corruption. Management may involve screening, selection, monitoring, and engagement with suppliers on their environmental and social impacts. The category does not address the impacts of external factors – such as climate change and other environmental and social factors – on suppliers' operations and/or on the availability and pricing of key resources, which is covered in a separate category.| |**SYSTEMICRISKMGT**|**Systemic Risk Management** - the category addresses the company's contributions to, or management of systemic risks resulting from large-scale weakening or collapse of systems upon which the economy and society depend. This includes financial systems, natural resource systems, and technological systems. It addresses the mechanisms a company has in place to reduce its contributions to systemic risks and to improve safeguards that may mitigate the impacts of systemic failure. For financial institutions, the category also captures the company's ability to absorb shocks arising from financial and economic stress and meet stricter regulatory requirements related to the complexity and interconnectedness of companies in the industry.| |**WASTEANDHZRDMATSMGT**|**Waste and Hazardous Materials Management** - the category addresses environmental issues associated with hazardous and non-hazardous waste generated by companies. It addresses a company's management of solid wastes in manufacturing, agriculture, and other industrial processes. It covers treatment, handling, storage, disposal, and regulatory compliance. The category does not cover emissions to air or wastewater, nor does it cover waste from end-of-life of products, which are addressed in separate categories.| |**WATERANDWASTEWATERMGT**|**Water and Wastewater Management** - the category addresses a company's water use, water consumption, wastewater generation, and other impacts of operations on water resources, which may be influenced by regional differences in the availability and quality of and competition for water resources. More specifically, it addresses management strategies including, but not limited to, water efficiency, intensity, and recycling. Lastly, the category also addresses management of wastewater treatment and discharge, including groundwater and aquifer pollution.| ### Helper Input **ALL** = Simply gives the ability to request all categories in a single request without having to explicitly list out all 26 in the request. *This is not the same as \"ALLCATEGORIES\" which is a single overall score for the company and returned as default.* . if omitted defaults to ["ALLCATEGORIES"] # noqa: E501
Keyword Args:
value ([str]): The SASB Categories requested for the respective scoreType(s). The default value is **ALLCATEGORIES**, which represents all 26 categories in a single overall score. To request a specifc category or list of categories, simply input the category names below. ### SASB Categories |**SASB Category Inputs**|**Description**| |---|---| |**ALLCATEGORIES**|**All Categories** - this category represents a company's overall SASB Score for the specific 'scoreType'. This value is equal to the cumulative average of all 26 SASB categories for the specific 'scoreType'.*Note that category is not available for the Dynamic Materiality 'scoreType'. |**ACCESSANDAFFORDABILITY**|**Access and Affordability** - The category addresses a company's ability to ensure broad access to its products and services, specifically in the context of underserved markets and/or population groups. It includes the management of issues related to universal needs, such as the accessibility and affordability of health care, financial services, utilities , education, and telecommunications.| |**AIRQUALITY**|**Air Quality** - the category addresses management of air quality impacts resulting from stationary (e.g., factories, power plants) and mobile sources (e.g., trucks, delivery vehicles, planes) as well as industrial emissions. Relevant airborne pollutants include, but are not limited to, oxides of nitrogen (NOx), oxides of sulfur (SOx), volatile organic compounds (VOCs), heavy metals, particulate matter, and chlorofluorocarbons. The category does not include GHG emissions, which are addressed in a separate category.| |**BUSINESSETHICS**|**Business Ethics** - the category addresses the company's approach to managing risks and opportunities surrounding ethical conduct of business, including fraud, corruption, bribery and facilitation payments, fiduciary responsibilities, and other behavior that may have an ethical component. This includes sensitivity to business norms and standards as they shift over time, jurisdiction, and culture. It addresses the company's ability to provide services that satisfy the highest professional and ethical standards of the industry, which means to avoid conflicts of interest, misrepresentation, bias, and negligence through training employees adequately and implementing policies and procedures to ensure employees provide services free from bias and error.| |**BUSMODELRESILIENCE**|**Business Model Resilience** - the category addresses an industry's capacity to manage risks and opportunities associated with incorporating social, environmental, and political transitions into long-term business model planning. This includes responsiveness to the transition to a low-carbon and climate-constrained economy, as well as growth and creation of new markets among unserved and underserved socioeconomic populations. The category highlights industries in which evolving environmental and social realities may challenge companies to fundamentally adapt or may put their business models at risk.| |**COMPETITIVEBEHAVIOR**|**Competitive Behavior** - the category covers social issues associated with existence of monopolies, which may include, but are not limited to, excessive prices, poor quality of service, and inefficiencies. It addresses a company's management of legal and social expectation around monopolistic and anti-competitive practices, including issues related to bargaining power, collusion, price fixing or manipulation, and protection of patents and intellectual property (IP).| |**CRITINCIDENTRISKMGT**|**Critical Incident Risk Management** - the category addresses the company's use of management systems and scenario planning to identify, understand, and prevent or minimize the occurrence of low-probability, high-impact accidents and emergencies with significant potential environmental and social externalities. It relates to the culture of safety at a company, its relevant safety management systems and technological controls, the potential human, environmental, and social implications of such events occurring, and the long-term effects to an organization, its workers, and society should these events occur.| |**CUSTOMERPRIVACY**|**Customer Privacy** - the category addresses management of risks related to the use of personally identifiable information (PII) and other customer or user data for secondary purposes including but not limited to marketing through affiliates and non-affiliates. The scope of the category includes social issues that may arise from a company's approach to collecting data, obtaining consent (e.g., opt-in policies), managing user and customer expectations regarding how their data is used, and managing evolving regulation. It excludes social issues arising from cybersecurity risks, which are covered in a separate category.| |**CUSTWELFARE**|**Customer Welfare** - the category addresses customer welfare concerns over issues including, but not limited to, health and nutrition of foods and beverages, antibiotic use in animal production, and management of controlled substances. The category addresses the company's ability to provide consumers with manufactured products and services that are aligned with societal expectations. It does not include issues directly related to quality and safety malfunctions of manufactured products and services, but instead addresses qualities inherent to the design and delivery of products and services where customer welfare may be in question. The scope of the category also captures companies' ability to prevent counterfeit products.| |**DATASECURITY**|**Data Security** - the category addresses management of risks related to collection, retention, and use of sensitive, confidential, and/or proprietary customer or user data. It includes social issues that may arise from incidents such as data breaches in which personally identifiable information (PII) and other user or customer data may be exposed. It addresses a company's strategy, policies, and practices related to IT infrastructure, staff training, record keeping, cooperation with law enforcement, and other mechanisms used to ensure security of customer or user data.| |**ECOLOGICALIMPACTS**|**Ecological Impacts** - the category addresses management of the company's impacts on ecosystems and biodiversity through activities including, but not limited to, land use for exploration, natural resource extraction, and cultivation, as well as project development, construction, and siting. The impacts include, but are not limited to, biodiversity loss, habitat destruction, and deforestation at all stages – planning, land acquisition, permitting, development, operations, and site remediation. The category does not cover impacts of climate change on ecosystems and biodiversity.| |**EMPENGDIVANDINC**|**Employee Engagement Diversity and Inclusion** - the category addresses a company's ability to ensure that its culture and hiring and promotion practices embrace the building of a diverse and inclusive workforce that reflects the makeup of local talent pools and its customer base. It addresses the issues of discriminatory practices on the bases of race, gender, ethnicity, religion, sexual orientation, and other factors.| |**EMPHEALTHANDSAFETY**|**Employee Health and Safety** - the category addresses a company's ability to create and maintain a safe and healthy workplace environment that is free of injuries, fatalities, and illness (both chronic and acute). It is traditionally accomplished through implementing safety management plans, developing training requirements for employees and contractors, and conducting regular audits of their own practices as well as those of their subcontractors. The category further captures how companies ensure physical and mental health of workforce through technology, training, corporate culture, regulatory compliance, monitoring and testing, and personal protective equipment.| |**ENERGYMGT**|**Energy Management** - the category addresses environmental impacts associated with energy consumption. It addresses the company's management of energy in manufacturing and/or for provision of products and services derived from utility providers (grid energy) not owned or controlled by the company. More specifically, it includes management of energy efficiency and intensity, energy mix, as well as grid reliance. Upstream (e.g., suppliers) and downstream (e.g., product use) energy use is not included in the scope.| |**GHGEMISSIONS**|**Greenhouse Gas Emissions** - the category addresses direct (Scope 1) greenhouse gas (GHG) emissions that a company generates through its operations. This includes GHG emissions from stationary (e.g., factories, power plants) and mobile sources (e.g., trucks, delivery vehicles, planes), whether a result of combustion of fuel or non-combusted direct releases during activities such as natural resource extraction, power generation, land use, or biogenic processes. The category further includes management of regulatory risks, environmental compliance, and reputational risks and opportunities, as they related to direct GHG emissions. The seven GHGs covered under the Kyoto Protocol are included within the category— carbon dioxide (CO2 ), methane (CH4), nitrous oxide (N2O), hydrofluorocarbons (HFCs), perfluorocarbons (PFCs), sulfur hexafluoride (SF6), and nitrogen trifluoride (NF3).| |**HUMANRIGHTSANDCOMRELS**|**Human Rights and Community Relations** - the category addresses management of the relationship between businesses and the communities in which they operate, including, but not limited to, management of direct and indirect impacts on core human rights and the treatment of indigenous peoples. More specifically, such management may cover socio-economic community impacts, community engagement, environmental justice, cultivation of local workforces, impact on local businesses, license to operate, and environmental/social impact assessments. The category does not include environmental impacts such as air pollution or waste which, although they may impact the health and safety of members of local communities, are addressed in separate categories.| |**LABORPRACTICES**|**Labor Practices** - the category addresses the company's ability to uphold commonly accepted labor standards in the workplace, including compliance with labor laws and internationally accepted norms and standards. This includes, but is not limited to, ensuring basic human rights related to child labor, forced or bonded labor, exploitative labor, fair wages and overtime pay, and other basic workers' rights. It also includes minimum wage policies and provision of benefits, which may influence how a workforce is attracted, retained, and motivated. The category further addresses a company's relationship with organized labor and freedom of association.| |**MGTOFLEGALANDREGENV**|**Management of the Legal and Regulatory Environment** - the category addresses a company's approach to engaging with regulators in cases where conflicting corporate and public interests may have the potential for long-term adverse direct or indirect environmental and social impacts. The category addresses a company's level of reliance upon regulatory policy or monetary incentives (such as subsidies and taxes), actions to influence industry policy (such as through lobbying), overall reliance on a favorable regulatory environment for business competitiveness, and ability to comply with relevant regulations. It may relate to the alignment of management and investor views of regulatory engagement and compliance at large.| |**MATSOURCINGANDEFF**|**Materials Sourcing and Efficiency** - the category addresses issues related to the resilience of materials supply chains to impacts of climate change and other external environmental and social factors. It captures the impacts of such external factors on operational activity of suppliers, which can further affect availability and pricing of key resources. It addresses a company's ability to manage these risks through product design, manufacturing, and end-of-life management, such as by using of recycled and renewable materials, reducing the use of key materials (dematerialization), maximizing resource efficiency in manufacturing, and making R&D investments in substitute materials. Additionally, companies can manage these issues by screening, selection, monitoring, and engagement with suppliers to ensure their resilience to external risks. It does not address issues associated with environmental and social externalities created by operational activity of individual suppliers, which is covered in a separate category.| |**MATERIALITY**|**Materiality** - this category represents a composite score of all 'material' SASB categories for the given entity. For more information on SASB's Materiality Map, visit [materiality.sasb.org](https://materiality.sasb.org/)| |**PHYIMPACTSOFCLIMATECHG**|**Physical Impacts of Climate Change** - the category addresses the company's ability to manage risks and opportunities associated with direct exposure of its owned or controlled assets and operations to actual or potential physical impacts of climate change. It captures environmental and social issues that may arise from operational disruptions due to physical impacts of climate change. It further captures socioeconomic issues resulting from companies failing to incorporate climate change consideration in products and services sold, such as insurance policies and mortgages. The category relates to the company's ability to adapt to increased frequency and severity of extreme weather, shifting climate, sea level risk, and other expected physical impacts of climate change. Management may involve enhancing resiliency of physical assets and/or surrounding infrastructure as well as incorporation of climate change-related considerations into key business activities (e.g., mortgage and insurance underwriting, planning and development of real estate projects).| |**PDANDLIFECYCLEMGT**|**Product Design and Lifecycle Management** - the category addresses incorporation of environmental, social, and governance (ESG) considerations in characteristics of products and services provided or sold by the company. It includes, but is not limited to, managing the lifecycle impacts of products and services, such as those related to packaging, distribution, use-phase resource intensity, and other environmental and social externalities that may occur during their use-phase or at the end of life. The category captures a company's ability to address customer and societal demand for more sustainable products and services as well as to meet evolving environmental and social regulation. It does not address direct environmental or social impacts of the company's operations nor does it address health and safety risks to consumers from product use, which are covered in other categories.| |**PRODQUALITYANDSFTY**|**Product Quality and Safety** - the category addresses issues involving unintended characteristics of products sold or services provided that may create health or safety risks to end-users. It addresses a company's ability to offer manufactured products and/or services that meet customer expectations with respect to their health and safety characteristics. It includes, but is not limited to, issues involving liability, management of recalls and market withdrawals, product testing, and chemicals/content/ ingredient management in products.| |**SELLPRACANDPRODLABEL**|**Selling Practices and Product Labeling** - the category addresses social issues that may arise from a failure to manage the transparency, accuracy, and comprehensibility of marketing statements, advertising, and labeling of products and services. It includes, but is not limited to, advertising standards and regulations, ethical and responsible marketing practices, misleading or deceptive labeling, as well as discriminatory or predatory selling and lending practices. This may include deceptive or aggressive selling practices in which incentive structures for employees could encourage the sale of products or services that are not in the best interest of customers or clients.| |**SUPPLYCHAINMGT**|**Supply Chain Management** - the category addresses management of environmental, social, and governance (ESG) risks within a company's supply chain. It addresses issues associated with environmental and social externalities created by suppliers through their operational activities. Such issues include, but are not limited to, environmental responsibility, human rights, labor practices, and ethics and corruption. Management may involve screening, selection, monitoring, and engagement with suppliers on their environmental and social impacts. The category does not address the impacts of external factors – such as climate change and other environmental and social factors – on suppliers' operations and/or on the availability and pricing of key resources, which is covered in a separate category.| |**SYSTEMICRISKMGT**|**Systemic Risk Management** - the category addresses the company's contributions to, or management of systemic risks resulting from large-scale weakening or collapse of systems upon which the economy and society depend. This includes financial systems, natural resource systems, and technological systems. It addresses the mechanisms a company has in place to reduce its contributions to systemic risks and to improve safeguards that may mitigate the impacts of systemic failure. For financial institutions, the category also captures the company's ability to absorb shocks arising from financial and economic stress and meet stricter regulatory requirements related to the complexity and interconnectedness of companies in the industry.| |**WASTEANDHZRDMATSMGT**|**Waste and Hazardous Materials Management** - the category addresses environmental issues associated with hazardous and non-hazardous waste generated by companies. It addresses a company's management of solid wastes in manufacturing, agriculture, and other industrial processes. It covers treatment, handling, storage, disposal, and regulatory compliance. The category does not cover emissions to air or wastewater, nor does it cover waste from end-of-life of products, which are addressed in separate categories.| |**WATERANDWASTEWATERMGT**|**Water and Wastewater Management** - the category addresses a company's water use, water consumption, wastewater generation, and other impacts of operations on water resources, which may be influenced by regional differences in the availability and quality of and competition for water resources. More specifically, it addresses management strategies including, but not limited to, water efficiency, intensity, and recycling. Lastly, the category also addresses management of wastewater treatment and discharge, including groundwater and aquifer pollution.| ### Helper Input **ALL** = Simply gives the ability to request all categories in a single request without having to explicitly list out all 26 in the request. *This is not the same as \"ALLCATEGORIES\" which is a single overall score for the company and returned as default.* . if omitted defaults to ["ALLCATEGORIES"] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
value = ["ALLCATEGORIES"]
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""SasbSpotlightsCategories - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([str]): The SASB Categories requested for the respective scoreType(s). The default value is **ALLCATEGORIES**, which represents all 26 categories in a single overall score. To request a specifc category or list of categories, simply input the category names below. ### SASB Categories |**SASB Category Inputs**|**Description**| |---|---| |**ALLCATEGORIES**|**All Categories** - this category represents a company's overall SASB Score for the specific 'scoreType'. This value is equal to the cumulative average of all 26 SASB categories for the specific 'scoreType'.*Note that category is not available for the Dynamic Materiality 'scoreType'. |**ACCESSANDAFFORDABILITY**|**Access and Affordability** - The category addresses a company's ability to ensure broad access to its products and services, specifically in the context of underserved markets and/or population groups. It includes the management of issues related to universal needs, such as the accessibility and affordability of health care, financial services, utilities , education, and telecommunications.| |**AIRQUALITY**|**Air Quality** - the category addresses management of air quality impacts resulting from stationary (e.g., factories, power plants) and mobile sources (e.g., trucks, delivery vehicles, planes) as well as industrial emissions. Relevant airborne pollutants include, but are not limited to, oxides of nitrogen (NOx), oxides of sulfur (SOx), volatile organic compounds (VOCs), heavy metals, particulate matter, and chlorofluorocarbons. The category does not include GHG emissions, which are addressed in a separate category.| |**BUSINESSETHICS**|**Business Ethics** - the category addresses the company's approach to managing risks and opportunities surrounding ethical conduct of business, including fraud, corruption, bribery and facilitation payments, fiduciary responsibilities, and other behavior that may have an ethical component. This includes sensitivity to business norms and standards as they shift over time, jurisdiction, and culture. It addresses the company's ability to provide services that satisfy the highest professional and ethical standards of the industry, which means to avoid conflicts of interest, misrepresentation, bias, and negligence through training employees adequately and implementing policies and procedures to ensure employees provide services free from bias and error.| |**BUSMODELRESILIENCE**|**Business Model Resilience** - the category addresses an industry's capacity to manage risks and opportunities associated with incorporating social, environmental, and political transitions into long-term business model planning. This includes responsiveness to the transition to a low-carbon and climate-constrained economy, as well as growth and creation of new markets among unserved and underserved socioeconomic populations. The category highlights industries in which evolving environmental and social realities may challenge companies to fundamentally adapt or may put their business models at risk.| |**COMPETITIVEBEHAVIOR**|**Competitive Behavior** - the category covers social issues associated with existence of monopolies, which may include, but are not limited to, excessive prices, poor quality of service, and inefficiencies. It addresses a company's management of legal and social expectation around monopolistic and anti-competitive practices, including issues related to bargaining power, collusion, price fixing or manipulation, and protection of patents and intellectual property (IP).| |**CRITINCIDENTRISKMGT**|**Critical Incident Risk Management** - the category addresses the company's use of management systems and scenario planning to identify, understand, and prevent or minimize the occurrence of low-probability, high-impact accidents and emergencies with significant potential environmental and social externalities. It relates to the culture of safety at a company, its relevant safety management systems and technological controls, the potential human, environmental, and social implications of such events occurring, and the long-term effects to an organization, its workers, and society should these events occur.| |**CUSTOMERPRIVACY**|**Customer Privacy** - the category addresses management of risks related to the use of personally identifiable information (PII) and other customer or user data for secondary purposes including but not limited to marketing through affiliates and non-affiliates. The scope of the category includes social issues that may arise from a company's approach to collecting data, obtaining consent (e.g., opt-in policies), managing user and customer expectations regarding how their data is used, and managing evolving regulation. It excludes social issues arising from cybersecurity risks, which are covered in a separate category.| |**CUSTWELFARE**|**Customer Welfare** - the category addresses customer welfare concerns over issues including, but not limited to, health and nutrition of foods and beverages, antibiotic use in animal production, and management of controlled substances. The category addresses the company's ability to provide consumers with manufactured products and services that are aligned with societal expectations. It does not include issues directly related to quality and safety malfunctions of manufactured products and services, but instead addresses qualities inherent to the design and delivery of products and services where customer welfare may be in question. The scope of the category also captures companies' ability to prevent counterfeit products.| |**DATASECURITY**|**Data Security** - the category addresses management of risks related to collection, retention, and use of sensitive, confidential, and/or proprietary customer or user data. It includes social issues that may arise from incidents such as data breaches in which personally identifiable information (PII) and other user or customer data may be exposed. It addresses a company's strategy, policies, and practices related to IT infrastructure, staff training, record keeping, cooperation with law enforcement, and other mechanisms used to ensure security of customer or user data.| |**ECOLOGICALIMPACTS**|**Ecological Impacts** - the category addresses management of the company's impacts on ecosystems and biodiversity through activities including, but not limited to, land use for exploration, natural resource extraction, and cultivation, as well as project development, construction, and siting. The impacts include, but are not limited to, biodiversity loss, habitat destruction, and deforestation at all stages – planning, land acquisition, permitting, development, operations, and site remediation. The category does not cover impacts of climate change on ecosystems and biodiversity.| |**EMPENGDIVANDINC**|**Employee Engagement Diversity and Inclusion** - the category addresses a company's ability to ensure that its culture and hiring and promotion practices embrace the building of a diverse and inclusive workforce that reflects the makeup of local talent pools and its customer base. It addresses the issues of discriminatory practices on the bases of race, gender, ethnicity, religion, sexual orientation, and other factors.| |**EMPHEALTHANDSAFETY**|**Employee Health and Safety** - the category addresses a company's ability to create and maintain a safe and healthy workplace environment that is free of injuries, fatalities, and illness (both chronic and acute). It is traditionally accomplished through implementing safety management plans, developing training requirements for employees and contractors, and conducting regular audits of their own practices as well as those of their subcontractors. The category further captures how companies ensure physical and mental health of workforce through technology, training, corporate culture, regulatory compliance, monitoring and testing, and personal protective equipment.| |**ENERGYMGT**|**Energy Management** - the category addresses environmental impacts associated with energy consumption. It addresses the company's management of energy in manufacturing and/or for provision of products and services derived from utility providers (grid energy) not owned or controlled by the company. More specifically, it includes management of energy efficiency and intensity, energy mix, as well as grid reliance. Upstream (e.g., suppliers) and downstream (e.g., product use) energy use is not included in the scope.| |**GHGEMISSIONS**|**Greenhouse Gas Emissions** - the category addresses direct (Scope 1) greenhouse gas (GHG) emissions that a company generates through its operations. This includes GHG emissions from stationary (e.g., factories, power plants) and mobile sources (e.g., trucks, delivery vehicles, planes), whether a result of combustion of fuel or non-combusted direct releases during activities such as natural resource extraction, power generation, land use, or biogenic processes. The category further includes management of regulatory risks, environmental compliance, and reputational risks and opportunities, as they related to direct GHG emissions. The seven GHGs covered under the Kyoto Protocol are included within the category— carbon dioxide (CO2 ), methane (CH4), nitrous oxide (N2O), hydrofluorocarbons (HFCs), perfluorocarbons (PFCs), sulfur hexafluoride (SF6), and nitrogen trifluoride (NF3).| |**HUMANRIGHTSANDCOMRELS**|**Human Rights and Community Relations** - the category addresses management of the relationship between businesses and the communities in which they operate, including, but not limited to, management of direct and indirect impacts on core human rights and the treatment of indigenous peoples. More specifically, such management may cover socio-economic community impacts, community engagement, environmental justice, cultivation of local workforces, impact on local businesses, license to operate, and environmental/social impact assessments. The category does not include environmental impacts such as air pollution or waste which, although they may impact the health and safety of members of local communities, are addressed in separate categories.| |**LABORPRACTICES**|**Labor Practices** - the category addresses the company's ability to uphold commonly accepted labor standards in the workplace, including compliance with labor laws and internationally accepted norms and standards. This includes, but is not limited to, ensuring basic human rights related to child labor, forced or bonded labor, exploitative labor, fair wages and overtime pay, and other basic workers' rights. It also includes minimum wage policies and provision of benefits, which may influence how a workforce is attracted, retained, and motivated. The category further addresses a company's relationship with organized labor and freedom of association.| |**MGTOFLEGALANDREGENV**|**Management of the Legal and Regulatory Environment** - the category addresses a company's approach to engaging with regulators in cases where conflicting corporate and public interests may have the potential for long-term adverse direct or indirect environmental and social impacts. The category addresses a company's level of reliance upon regulatory policy or monetary incentives (such as subsidies and taxes), actions to influence industry policy (such as through lobbying), overall reliance on a favorable regulatory environment for business competitiveness, and ability to comply with relevant regulations. It may relate to the alignment of management and investor views of regulatory engagement and compliance at large.| |**MATSOURCINGANDEFF**|**Materials Sourcing and Efficiency** - the category addresses issues related to the resilience of materials supply chains to impacts of climate change and other external environmental and social factors. It captures the impacts of such external factors on operational activity of suppliers, which can further affect availability and pricing of key resources. It addresses a company's ability to manage these risks through product design, manufacturing, and end-of-life management, such as by using of recycled and renewable materials, reducing the use of key materials (dematerialization), maximizing resource efficiency in manufacturing, and making R&D investments in substitute materials. Additionally, companies can manage these issues by screening, selection, monitoring, and engagement with suppliers to ensure their resilience to external risks. It does not address issues associated with environmental and social externalities created by operational activity of individual suppliers, which is covered in a separate category.| |**MATERIALITY**|**Materiality** - this category represents a composite score of all 'material' SASB categories for the given entity. For more information on SASB's Materiality Map, visit [materiality.sasb.org](https://materiality.sasb.org/)| |**PHYIMPACTSOFCLIMATECHG**|**Physical Impacts of Climate Change** - the category addresses the company's ability to manage risks and opportunities associated with direct exposure of its owned or controlled assets and operations to actual or potential physical impacts of climate change. It captures environmental and social issues that may arise from operational disruptions due to physical impacts of climate change. It further captures socioeconomic issues resulting from companies failing to incorporate climate change consideration in products and services sold, such as insurance policies and mortgages. The category relates to the company's ability to adapt to increased frequency and severity of extreme weather, shifting climate, sea level risk, and other expected physical impacts of climate change. Management may involve enhancing resiliency of physical assets and/or surrounding infrastructure as well as incorporation of climate change-related considerations into key business activities (e.g., mortgage and insurance underwriting, planning and development of real estate projects).| |**PDANDLIFECYCLEMGT**|**Product Design and Lifecycle Management** - the category addresses incorporation of environmental, social, and governance (ESG) considerations in characteristics of products and services provided or sold by the company. It includes, but is not limited to, managing the lifecycle impacts of products and services, such as those related to packaging, distribution, use-phase resource intensity, and other environmental and social externalities that may occur during their use-phase or at the end of life. The category captures a company's ability to address customer and societal demand for more sustainable products and services as well as to meet evolving environmental and social regulation. It does not address direct environmental or social impacts of the company's operations nor does it address health and safety risks to consumers from product use, which are covered in other categories.| |**PRODQUALITYANDSFTY**|**Product Quality and Safety** - the category addresses issues involving unintended characteristics of products sold or services provided that may create health or safety risks to end-users. It addresses a company's ability to offer manufactured products and/or services that meet customer expectations with respect to their health and safety characteristics. It includes, but is not limited to, issues involving liability, management of recalls and market withdrawals, product testing, and chemicals/content/ ingredient management in products.| |**SELLPRACANDPRODLABEL**|**Selling Practices and Product Labeling** - the category addresses social issues that may arise from a failure to manage the transparency, accuracy, and comprehensibility of marketing statements, advertising, and labeling of products and services. It includes, but is not limited to, advertising standards and regulations, ethical and responsible marketing practices, misleading or deceptive labeling, as well as discriminatory or predatory selling and lending practices. This may include deceptive or aggressive selling practices in which incentive structures for employees could encourage the sale of products or services that are not in the best interest of customers or clients.| |**SUPPLYCHAINMGT**|**Supply Chain Management** - the category addresses management of environmental, social, and governance (ESG) risks within a company's supply chain. It addresses issues associated with environmental and social externalities created by suppliers through their operational activities. Such issues include, but are not limited to, environmental responsibility, human rights, labor practices, and ethics and corruption. Management may involve screening, selection, monitoring, and engagement with suppliers on their environmental and social impacts. The category does not address the impacts of external factors – such as climate change and other environmental and social factors – on suppliers' operations and/or on the availability and pricing of key resources, which is covered in a separate category.| |**SYSTEMICRISKMGT**|**Systemic Risk Management** - the category addresses the company's contributions to, or management of systemic risks resulting from large-scale weakening or collapse of systems upon which the economy and society depend. This includes financial systems, natural resource systems, and technological systems. It addresses the mechanisms a company has in place to reduce its contributions to systemic risks and to improve safeguards that may mitigate the impacts of systemic failure. For financial institutions, the category also captures the company's ability to absorb shocks arising from financial and economic stress and meet stricter regulatory requirements related to the complexity and interconnectedness of companies in the industry.| |**WASTEANDHZRDMATSMGT**|**Waste and Hazardous Materials Management** - the category addresses environmental issues associated with hazardous and non-hazardous waste generated by companies. It addresses a company's management of solid wastes in manufacturing, agriculture, and other industrial processes. It covers treatment, handling, storage, disposal, and regulatory compliance. The category does not cover emissions to air or wastewater, nor does it cover waste from end-of-life of products, which are addressed in separate categories.| |**WATERANDWASTEWATERMGT**|**Water and Wastewater Management** - the category addresses a company's water use, water consumption, wastewater generation, and other impacts of operations on water resources, which may be influenced by regional differences in the availability and quality of and competition for water resources. More specifically, it addresses management strategies including, but not limited to, water efficiency, intensity, and recycling. Lastly, the category also addresses management of wastewater treatment and discharge, including groundwater and aquifer pollution.| ### Helper Input **ALL** = Simply gives the ability to request all categories in a single request without having to explicitly list out all 26 in the request. *This is not the same as \"ALLCATEGORIES\" which is a single overall score for the company and returned as default.* . if omitted defaults to ["ALLCATEGORIES"] # noqa: E501
Keyword Args:
value ([str]): The SASB Categories requested for the respective scoreType(s). The default value is **ALLCATEGORIES**, which represents all 26 categories in a single overall score. To request a specifc category or list of categories, simply input the category names below. ### SASB Categories |**SASB Category Inputs**|**Description**| |---|---| |**ALLCATEGORIES**|**All Categories** - this category represents a company's overall SASB Score for the specific 'scoreType'. This value is equal to the cumulative average of all 26 SASB categories for the specific 'scoreType'.*Note that category is not available for the Dynamic Materiality 'scoreType'. |**ACCESSANDAFFORDABILITY**|**Access and Affordability** - The category addresses a company's ability to ensure broad access to its products and services, specifically in the context of underserved markets and/or population groups. It includes the management of issues related to universal needs, such as the accessibility and affordability of health care, financial services, utilities , education, and telecommunications.| |**AIRQUALITY**|**Air Quality** - the category addresses management of air quality impacts resulting from stationary (e.g., factories, power plants) and mobile sources (e.g., trucks, delivery vehicles, planes) as well as industrial emissions. Relevant airborne pollutants include, but are not limited to, oxides of nitrogen (NOx), oxides of sulfur (SOx), volatile organic compounds (VOCs), heavy metals, particulate matter, and chlorofluorocarbons. The category does not include GHG emissions, which are addressed in a separate category.| |**BUSINESSETHICS**|**Business Ethics** - the category addresses the company's approach to managing risks and opportunities surrounding ethical conduct of business, including fraud, corruption, bribery and facilitation payments, fiduciary responsibilities, and other behavior that may have an ethical component. This includes sensitivity to business norms and standards as they shift over time, jurisdiction, and culture. It addresses the company's ability to provide services that satisfy the highest professional and ethical standards of the industry, which means to avoid conflicts of interest, misrepresentation, bias, and negligence through training employees adequately and implementing policies and procedures to ensure employees provide services free from bias and error.| |**BUSMODELRESILIENCE**|**Business Model Resilience** - the category addresses an industry's capacity to manage risks and opportunities associated with incorporating social, environmental, and political transitions into long-term business model planning. This includes responsiveness to the transition to a low-carbon and climate-constrained economy, as well as growth and creation of new markets among unserved and underserved socioeconomic populations. The category highlights industries in which evolving environmental and social realities may challenge companies to fundamentally adapt or may put their business models at risk.| |**COMPETITIVEBEHAVIOR**|**Competitive Behavior** - the category covers social issues associated with existence of monopolies, which may include, but are not limited to, excessive prices, poor quality of service, and inefficiencies. It addresses a company's management of legal and social expectation around monopolistic and anti-competitive practices, including issues related to bargaining power, collusion, price fixing or manipulation, and protection of patents and intellectual property (IP).| |**CRITINCIDENTRISKMGT**|**Critical Incident Risk Management** - the category addresses the company's use of management systems and scenario planning to identify, understand, and prevent or minimize the occurrence of low-probability, high-impact accidents and emergencies with significant potential environmental and social externalities. It relates to the culture of safety at a company, its relevant safety management systems and technological controls, the potential human, environmental, and social implications of such events occurring, and the long-term effects to an organization, its workers, and society should these events occur.| |**CUSTOMERPRIVACY**|**Customer Privacy** - the category addresses management of risks related to the use of personally identifiable information (PII) and other customer or user data for secondary purposes including but not limited to marketing through affiliates and non-affiliates. The scope of the category includes social issues that may arise from a company's approach to collecting data, obtaining consent (e.g., opt-in policies), managing user and customer expectations regarding how their data is used, and managing evolving regulation. It excludes social issues arising from cybersecurity risks, which are covered in a separate category.| |**CUSTWELFARE**|**Customer Welfare** - the category addresses customer welfare concerns over issues including, but not limited to, health and nutrition of foods and beverages, antibiotic use in animal production, and management of controlled substances. The category addresses the company's ability to provide consumers with manufactured products and services that are aligned with societal expectations. It does not include issues directly related to quality and safety malfunctions of manufactured products and services, but instead addresses qualities inherent to the design and delivery of products and services where customer welfare may be in question. The scope of the category also captures companies' ability to prevent counterfeit products.| |**DATASECURITY**|**Data Security** - the category addresses management of risks related to collection, retention, and use of sensitive, confidential, and/or proprietary customer or user data. It includes social issues that may arise from incidents such as data breaches in which personally identifiable information (PII) and other user or customer data may be exposed. It addresses a company's strategy, policies, and practices related to IT infrastructure, staff training, record keeping, cooperation with law enforcement, and other mechanisms used to ensure security of customer or user data.| |**ECOLOGICALIMPACTS**|**Ecological Impacts** - the category addresses management of the company's impacts on ecosystems and biodiversity through activities including, but not limited to, land use for exploration, natural resource extraction, and cultivation, as well as project development, construction, and siting. The impacts include, but are not limited to, biodiversity loss, habitat destruction, and deforestation at all stages – planning, land acquisition, permitting, development, operations, and site remediation. The category does not cover impacts of climate change on ecosystems and biodiversity.| |**EMPENGDIVANDINC**|**Employee Engagement Diversity and Inclusion** - the category addresses a company's ability to ensure that its culture and hiring and promotion practices embrace the building of a diverse and inclusive workforce that reflects the makeup of local talent pools and its customer base. It addresses the issues of discriminatory practices on the bases of race, gender, ethnicity, religion, sexual orientation, and other factors.| |**EMPHEALTHANDSAFETY**|**Employee Health and Safety** - the category addresses a company's ability to create and maintain a safe and healthy workplace environment that is free of injuries, fatalities, and illness (both chronic and acute). It is traditionally accomplished through implementing safety management plans, developing training requirements for employees and contractors, and conducting regular audits of their own practices as well as those of their subcontractors. The category further captures how companies ensure physical and mental health of workforce through technology, training, corporate culture, regulatory compliance, monitoring and testing, and personal protective equipment.| |**ENERGYMGT**|**Energy Management** - the category addresses environmental impacts associated with energy consumption. It addresses the company's management of energy in manufacturing and/or for provision of products and services derived from utility providers (grid energy) not owned or controlled by the company. More specifically, it includes management of energy efficiency and intensity, energy mix, as well as grid reliance. Upstream (e.g., suppliers) and downstream (e.g., product use) energy use is not included in the scope.| |**GHGEMISSIONS**|**Greenhouse Gas Emissions** - the category addresses direct (Scope 1) greenhouse gas (GHG) emissions that a company generates through its operations. This includes GHG emissions from stationary (e.g., factories, power plants) and mobile sources (e.g., trucks, delivery vehicles, planes), whether a result of combustion of fuel or non-combusted direct releases during activities such as natural resource extraction, power generation, land use, or biogenic processes. The category further includes management of regulatory risks, environmental compliance, and reputational risks and opportunities, as they related to direct GHG emissions. The seven GHGs covered under the Kyoto Protocol are included within the category— carbon dioxide (CO2 ), methane (CH4), nitrous oxide (N2O), hydrofluorocarbons (HFCs), perfluorocarbons (PFCs), sulfur hexafluoride (SF6), and nitrogen trifluoride (NF3).| |**HUMANRIGHTSANDCOMRELS**|**Human Rights and Community Relations** - the category addresses management of the relationship between businesses and the communities in which they operate, including, but not limited to, management of direct and indirect impacts on core human rights and the treatment of indigenous peoples. More specifically, such management may cover socio-economic community impacts, community engagement, environmental justice, cultivation of local workforces, impact on local businesses, license to operate, and environmental/social impact assessments. The category does not include environmental impacts such as air pollution or waste which, although they may impact the health and safety of members of local communities, are addressed in separate categories.| |**LABORPRACTICES**|**Labor Practices** - the category addresses the company's ability to uphold commonly accepted labor standards in the workplace, including compliance with labor laws and internationally accepted norms and standards. This includes, but is not limited to, ensuring basic human rights related to child labor, forced or bonded labor, exploitative labor, fair wages and overtime pay, and other basic workers' rights. It also includes minimum wage policies and provision of benefits, which may influence how a workforce is attracted, retained, and motivated. The category further addresses a company's relationship with organized labor and freedom of association.| |**MGTOFLEGALANDREGENV**|**Management of the Legal and Regulatory Environment** - the category addresses a company's approach to engaging with regulators in cases where conflicting corporate and public interests may have the potential for long-term adverse direct or indirect environmental and social impacts. The category addresses a company's level of reliance upon regulatory policy or monetary incentives (such as subsidies and taxes), actions to influence industry policy (such as through lobbying), overall reliance on a favorable regulatory environment for business competitiveness, and ability to comply with relevant regulations. It may relate to the alignment of management and investor views of regulatory engagement and compliance at large.| |**MATSOURCINGANDEFF**|**Materials Sourcing and Efficiency** - the category addresses issues related to the resilience of materials supply chains to impacts of climate change and other external environmental and social factors. It captures the impacts of such external factors on operational activity of suppliers, which can further affect availability and pricing of key resources. It addresses a company's ability to manage these risks through product design, manufacturing, and end-of-life management, such as by using of recycled and renewable materials, reducing the use of key materials (dematerialization), maximizing resource efficiency in manufacturing, and making R&D investments in substitute materials. Additionally, companies can manage these issues by screening, selection, monitoring, and engagement with suppliers to ensure their resilience to external risks. It does not address issues associated with environmental and social externalities created by operational activity of individual suppliers, which is covered in a separate category.| |**MATERIALITY**|**Materiality** - this category represents a composite score of all 'material' SASB categories for the given entity. For more information on SASB's Materiality Map, visit [materiality.sasb.org](https://materiality.sasb.org/)| |**PHYIMPACTSOFCLIMATECHG**|**Physical Impacts of Climate Change** - the category addresses the company's ability to manage risks and opportunities associated with direct exposure of its owned or controlled assets and operations to actual or potential physical impacts of climate change. It captures environmental and social issues that may arise from operational disruptions due to physical impacts of climate change. It further captures socioeconomic issues resulting from companies failing to incorporate climate change consideration in products and services sold, such as insurance policies and mortgages. The category relates to the company's ability to adapt to increased frequency and severity of extreme weather, shifting climate, sea level risk, and other expected physical impacts of climate change. Management may involve enhancing resiliency of physical assets and/or surrounding infrastructure as well as incorporation of climate change-related considerations into key business activities (e.g., mortgage and insurance underwriting, planning and development of real estate projects).| |**PDANDLIFECYCLEMGT**|**Product Design and Lifecycle Management** - the category addresses incorporation of environmental, social, and governance (ESG) considerations in characteristics of products and services provided or sold by the company. It includes, but is not limited to, managing the lifecycle impacts of products and services, such as those related to packaging, distribution, use-phase resource intensity, and other environmental and social externalities that may occur during their use-phase or at the end of life. The category captures a company's ability to address customer and societal demand for more sustainable products and services as well as to meet evolving environmental and social regulation. It does not address direct environmental or social impacts of the company's operations nor does it address health and safety risks to consumers from product use, which are covered in other categories.| |**PRODQUALITYANDSFTY**|**Product Quality and Safety** - the category addresses issues involving unintended characteristics of products sold or services provided that may create health or safety risks to end-users. It addresses a company's ability to offer manufactured products and/or services that meet customer expectations with respect to their health and safety characteristics. It includes, but is not limited to, issues involving liability, management of recalls and market withdrawals, product testing, and chemicals/content/ ingredient management in products.| |**SELLPRACANDPRODLABEL**|**Selling Practices and Product Labeling** - the category addresses social issues that may arise from a failure to manage the transparency, accuracy, and comprehensibility of marketing statements, advertising, and labeling of products and services. It includes, but is not limited to, advertising standards and regulations, ethical and responsible marketing practices, misleading or deceptive labeling, as well as discriminatory or predatory selling and lending practices. This may include deceptive or aggressive selling practices in which incentive structures for employees could encourage the sale of products or services that are not in the best interest of customers or clients.| |**SUPPLYCHAINMGT**|**Supply Chain Management** - the category addresses management of environmental, social, and governance (ESG) risks within a company's supply chain. It addresses issues associated with environmental and social externalities created by suppliers through their operational activities. Such issues include, but are not limited to, environmental responsibility, human rights, labor practices, and ethics and corruption. Management may involve screening, selection, monitoring, and engagement with suppliers on their environmental and social impacts. The category does not address the impacts of external factors – such as climate change and other environmental and social factors – on suppliers' operations and/or on the availability and pricing of key resources, which is covered in a separate category.| |**SYSTEMICRISKMGT**|**Systemic Risk Management** - the category addresses the company's contributions to, or management of systemic risks resulting from large-scale weakening or collapse of systems upon which the economy and society depend. This includes financial systems, natural resource systems, and technological systems. It addresses the mechanisms a company has in place to reduce its contributions to systemic risks and to improve safeguards that may mitigate the impacts of systemic failure. For financial institutions, the category also captures the company's ability to absorb shocks arising from financial and economic stress and meet stricter regulatory requirements related to the complexity and interconnectedness of companies in the industry.| |**WASTEANDHZRDMATSMGT**|**Waste and Hazardous Materials Management** - the category addresses environmental issues associated with hazardous and non-hazardous waste generated by companies. It addresses a company's management of solid wastes in manufacturing, agriculture, and other industrial processes. It covers treatment, handling, storage, disposal, and regulatory compliance. The category does not cover emissions to air or wastewater, nor does it cover waste from end-of-life of products, which are addressed in separate categories.| |**WATERANDWASTEWATERMGT**|**Water and Wastewater Management** - the category addresses a company's water use, water consumption, wastewater generation, and other impacts of operations on water resources, which may be influenced by regional differences in the availability and quality of and competition for water resources. More specifically, it addresses management strategies including, but not limited to, water efficiency, intensity, and recycling. Lastly, the category also addresses management of wastewater treatment and discharge, including groundwater and aquifer pollution.| ### Helper Input **ALL** = Simply gives the ability to request all categories in a single request without having to explicitly list out all 26 in the request. *This is not the same as \"ALLCATEGORIES\" which is a single overall score for the company and returned as default.* . if omitted defaults to ["ALLCATEGORIES"] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
value = ["ALLCATEGORIES"]
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
| 325.4375
| 19,242
| 0.787661
| 11,895
| 88,519
| 5.837999
| 0.082388
| 0.028513
| 0.031105
| 0.012442
| 0.970436
| 0.969932
| 0.969572
| 0.968766
| 0.968766
| 0.968766
| 0
| 0.001178
| 0.165818
| 88,519
| 271
| 19,243
| 326.638376
| 0.939045
| 0.940984
| 0
| 0.592
| 0
| 0
| 0.144358
| 0.030838
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032
| false
| 0.032
| 0.032
| 0.008
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
2cfb765c776af52e45f1680f2d3a099e58b06987
| 144
|
py
|
Python
|
images/tests/__init__.py
|
DevangS/CoralNet
|
7c56d4ec95a771718175bd94c3ef51c4095082e3
|
[
"BSD-2-Clause"
] | 4
|
2015-12-23T05:14:35.000Z
|
2019-07-09T03:27:10.000Z
|
images/tests/__init__.py
|
DevangS/CoralNet
|
7c56d4ec95a771718175bd94c3ef51c4095082e3
|
[
"BSD-2-Clause"
] | 3
|
2015-04-07T02:45:15.000Z
|
2015-07-01T19:25:10.000Z
|
images/tests/__init__.py
|
DevangS/CoralNet
|
7c56d4ec95a771718175bd94c3ef51c4095082e3
|
[
"BSD-2-Clause"
] | 2
|
2016-01-21T17:25:48.000Z
|
2019-08-29T18:42:14.000Z
|
# Import tests from each test file.
# e.g. if we have tests/sources.py, then "from sources import *"
from sources import *
from tasks import *
| 24
| 64
| 0.722222
| 24
| 144
| 4.333333
| 0.625
| 0.211538
| 0.326923
| 0.403846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 144
| 5
| 65
| 28.8
| 0.888889
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ad874b470a5024c44f3bc75186f62d05dfa6d4fe
| 11,092
|
py
|
Python
|
sdk/python/pulumi_akamai/config/outputs.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2021-01-21T15:22:12.000Z
|
2021-08-25T14:15:29.000Z
|
sdk/python/pulumi_akamai/config/outputs.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | 59
|
2020-08-13T14:39:36.000Z
|
2022-03-31T15:19:48.000Z
|
sdk/python/pulumi_akamai/config/outputs.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'Appsecs',
'Config',
'Dns',
'Gtm',
'Networks',
'Property',
]
@pulumi.output_type
class Appsecs(dict):
def __init__(__self__, *,
access_token: Optional[str] = None,
account_key: Optional[str] = None,
client_secret: Optional[str] = None,
client_token: Optional[str] = None,
host: Optional[str] = None,
max_body: Optional[int] = None):
if access_token is not None:
pulumi.set(__self__, "access_token", access_token)
if account_key is not None:
pulumi.set(__self__, "account_key", account_key)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if client_token is not None:
pulumi.set(__self__, "client_token", client_token)
if host is not None:
pulumi.set(__self__, "host", host)
if max_body is not None:
pulumi.set(__self__, "max_body", max_body)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> Optional[str]:
return pulumi.get(self, "access_token")
@property
@pulumi.getter(name="accountKey")
def account_key(self) -> Optional[str]:
return pulumi.get(self, "account_key")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[str]:
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="clientToken")
def client_token(self) -> Optional[str]:
return pulumi.get(self, "client_token")
@property
@pulumi.getter
def host(self) -> Optional[str]:
return pulumi.get(self, "host")
@property
@pulumi.getter(name="maxBody")
def max_body(self) -> Optional[int]:
return pulumi.get(self, "max_body")
@pulumi.output_type
class Config(dict):
def __init__(__self__, *,
access_token: Optional[str] = None,
account_key: Optional[str] = None,
client_secret: Optional[str] = None,
client_token: Optional[str] = None,
host: Optional[str] = None,
max_body: Optional[int] = None):
if access_token is not None:
pulumi.set(__self__, "access_token", access_token)
if account_key is not None:
pulumi.set(__self__, "account_key", account_key)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if client_token is not None:
pulumi.set(__self__, "client_token", client_token)
if host is not None:
pulumi.set(__self__, "host", host)
if max_body is not None:
pulumi.set(__self__, "max_body", max_body)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> Optional[str]:
return pulumi.get(self, "access_token")
@property
@pulumi.getter(name="accountKey")
def account_key(self) -> Optional[str]:
return pulumi.get(self, "account_key")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[str]:
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="clientToken")
def client_token(self) -> Optional[str]:
return pulumi.get(self, "client_token")
@property
@pulumi.getter
def host(self) -> Optional[str]:
return pulumi.get(self, "host")
@property
@pulumi.getter(name="maxBody")
def max_body(self) -> Optional[int]:
return pulumi.get(self, "max_body")
@pulumi.output_type
class Dns(dict):
def __init__(__self__, *,
access_token: Optional[str] = None,
account_key: Optional[str] = None,
client_secret: Optional[str] = None,
client_token: Optional[str] = None,
host: Optional[str] = None,
max_body: Optional[int] = None):
if access_token is not None:
pulumi.set(__self__, "access_token", access_token)
if account_key is not None:
pulumi.set(__self__, "account_key", account_key)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if client_token is not None:
pulumi.set(__self__, "client_token", client_token)
if host is not None:
pulumi.set(__self__, "host", host)
if max_body is not None:
pulumi.set(__self__, "max_body", max_body)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> Optional[str]:
return pulumi.get(self, "access_token")
@property
@pulumi.getter(name="accountKey")
def account_key(self) -> Optional[str]:
return pulumi.get(self, "account_key")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[str]:
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="clientToken")
def client_token(self) -> Optional[str]:
return pulumi.get(self, "client_token")
@property
@pulumi.getter
def host(self) -> Optional[str]:
return pulumi.get(self, "host")
@property
@pulumi.getter(name="maxBody")
def max_body(self) -> Optional[int]:
return pulumi.get(self, "max_body")
@pulumi.output_type
class Gtm(dict):
def __init__(__self__, *,
access_token: Optional[str] = None,
account_key: Optional[str] = None,
client_secret: Optional[str] = None,
client_token: Optional[str] = None,
host: Optional[str] = None,
max_body: Optional[int] = None):
if access_token is not None:
pulumi.set(__self__, "access_token", access_token)
if account_key is not None:
pulumi.set(__self__, "account_key", account_key)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if client_token is not None:
pulumi.set(__self__, "client_token", client_token)
if host is not None:
pulumi.set(__self__, "host", host)
if max_body is not None:
pulumi.set(__self__, "max_body", max_body)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> Optional[str]:
return pulumi.get(self, "access_token")
@property
@pulumi.getter(name="accountKey")
def account_key(self) -> Optional[str]:
return pulumi.get(self, "account_key")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[str]:
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="clientToken")
def client_token(self) -> Optional[str]:
return pulumi.get(self, "client_token")
@property
@pulumi.getter
def host(self) -> Optional[str]:
return pulumi.get(self, "host")
@property
@pulumi.getter(name="maxBody")
def max_body(self) -> Optional[int]:
return pulumi.get(self, "max_body")
@pulumi.output_type
class Networks(dict):
def __init__(__self__, *,
access_token: Optional[str] = None,
account_key: Optional[str] = None,
client_secret: Optional[str] = None,
client_token: Optional[str] = None,
host: Optional[str] = None,
max_body: Optional[int] = None):
if access_token is not None:
pulumi.set(__self__, "access_token", access_token)
if account_key is not None:
pulumi.set(__self__, "account_key", account_key)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if client_token is not None:
pulumi.set(__self__, "client_token", client_token)
if host is not None:
pulumi.set(__self__, "host", host)
if max_body is not None:
pulumi.set(__self__, "max_body", max_body)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> Optional[str]:
return pulumi.get(self, "access_token")
@property
@pulumi.getter(name="accountKey")
def account_key(self) -> Optional[str]:
return pulumi.get(self, "account_key")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[str]:
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="clientToken")
def client_token(self) -> Optional[str]:
return pulumi.get(self, "client_token")
@property
@pulumi.getter
def host(self) -> Optional[str]:
return pulumi.get(self, "host")
@property
@pulumi.getter(name="maxBody")
def max_body(self) -> Optional[int]:
return pulumi.get(self, "max_body")
@pulumi.output_type
class Property(dict):
def __init__(__self__, *,
access_token: Optional[str] = None,
account_key: Optional[str] = None,
client_secret: Optional[str] = None,
client_token: Optional[str] = None,
host: Optional[str] = None,
max_body: Optional[int] = None):
if access_token is not None:
pulumi.set(__self__, "access_token", access_token)
if account_key is not None:
pulumi.set(__self__, "account_key", account_key)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if client_token is not None:
pulumi.set(__self__, "client_token", client_token)
if host is not None:
pulumi.set(__self__, "host", host)
if max_body is not None:
pulumi.set(__self__, "max_body", max_body)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> Optional[str]:
return pulumi.get(self, "access_token")
@property
@pulumi.getter(name="accountKey")
def account_key(self) -> Optional[str]:
return pulumi.get(self, "account_key")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[str]:
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="clientToken")
def client_token(self) -> Optional[str]:
return pulumi.get(self, "client_token")
@property
@pulumi.getter
def host(self) -> Optional[str]:
return pulumi.get(self, "host")
@property
@pulumi.getter(name="maxBody")
def max_body(self) -> Optional[int]:
return pulumi.get(self, "max_body")
| 32.816568
| 87
| 0.611251
| 1,322
| 11,092
| 4.83888
| 0.058245
| 0.103173
| 0.050649
| 0.084415
| 0.94685
| 0.94685
| 0.94685
| 0.94685
| 0.94685
| 0.94685
| 0
| 0.000124
| 0.271006
| 11,092
| 337
| 88
| 32.913947
| 0.790997
| 0.015957
| 0
| 0.932862
| 1
| 0
| 0.097259
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.14841
| false
| 0
| 0.017668
| 0.127208
| 0.314488
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 9
|
ad8f4b4dea2337cb7ee666c66320dde953c1797f
| 148
|
py
|
Python
|
mysite/dipapp/admin.py
|
Shamsuzzaman321/django_rest
|
80e8d046595bc9ed3d0c5ca44696730c8827f931
|
[
"MIT"
] | null | null | null |
mysite/dipapp/admin.py
|
Shamsuzzaman321/django_rest
|
80e8d046595bc9ed3d0c5ca44696730c8827f931
|
[
"MIT"
] | 5
|
2021-03-19T04:50:40.000Z
|
2021-09-22T19:19:22.000Z
|
mysite/dipapp/admin.py
|
Shamsuzzaman321/django_rest
|
80e8d046595bc9ed3d0c5ca44696730c8827f931
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
# from django.contrib import admin
from .models import Hero
admin.site.register(Hero)
| 24.666667
| 34
| 0.804054
| 22
| 148
| 5.409091
| 0.5
| 0.168067
| 0.285714
| 0.386555
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128378
| 148
| 6
| 35
| 24.666667
| 0.922481
| 0.398649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a8dcbacdcaf1349939843379f56867c9a67b2c93
| 3,027
|
py
|
Python
|
farmer/farmer.py
|
Nabeel965/AgriTechies
|
75826fbc88c4c41bd4a8a1ec64e19ae593559e78
|
[
"Apache-2.0"
] | null | null | null |
farmer/farmer.py
|
Nabeel965/AgriTechies
|
75826fbc88c4c41bd4a8a1ec64e19ae593559e78
|
[
"Apache-2.0"
] | null | null | null |
farmer/farmer.py
|
Nabeel965/AgriTechies
|
75826fbc88c4c41bd4a8a1ec64e19ae593559e78
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Crop
# class CropRegisterForm(UserCreationForm):
# #email = forms.EmailField()
# #model = Crop
# first_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
# last_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
# email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
# product_type = forms.CharField(max_length=30, required=False, help_text='Optional.')
# product_name= forms.CharField(max_length=30, required=False, help_text='Optional.')
# crop_stage = forms.CharField(max_length=30, required=False, help_text='Optional.')
# total_crop_quantity = forms.CharField(max_length=30, required=False, help_text='Optional.')
# daily_loading = forms.CharField(max_length=30, required=False, help_text='Optional.')
# required_temperature = forms.CharField(max_length=30, required=False, help_text='Optional.')
# required_relative_humidity = forms.CharField(max_length=30, required=False, help_text='Optional.')
# duration_of_storage = forms.CharField(max_length=30, required=False, help_text='Optional.')
# pick_up_location = forms.CharField(max_length=30, required=False, help_text='Optional.')
# drop_off_location = forms.CharField(max_length=30, required=False, help_text='Optional.')
#
#
# class Meta:
# model = User
# fields = ['username', 'first_name', 'last_name', "email", "product_type", "product_name","crop_stage", "total_crop_quantity", "daily_loading", "required_temperature", "required_relative_humidity", "duration_of_storage", "pick_up_location", "drop_off_location"]
class CropRegisterForm(forms.ModelForm):
product_type = forms.CharField(max_length=30, required=False, help_text='Optional.')
product_name= forms.CharField(max_length=30, required=False, help_text='Optional.')
crop_stage = forms.CharField(max_length=30, required=False, help_text='Optional.')
total_crop_quantity = forms.CharField(max_length=30, required=False, help_text='Optional.')
daily_loading = forms.CharField(max_length=30, required=False, help_text='Optional.')
required_temperature = forms.CharField(max_length=30, required=False, help_text='Optional.')
required_relative_humidity = forms.CharField(max_length=30, required=False, help_text='Optional.')
duration_of_storage = forms.CharField(max_length=30, required=False, help_text='Optional.')
pick_up_location = forms.CharField(max_length=30, required=False, help_text='Optional.')
drop_off_location = forms.CharField(max_length=30, required=False, help_text='Optional.')
class Meta:
model = Crop
fields = ["product_type", "product_name","crop_stage", "total_crop_quantity", "daily_loading", "required_temperature", "required_relative_humidity", "duration_of_storage", "pick_up_location", "drop_off_location"]
| 72.071429
| 270
| 0.755864
| 388
| 3,027
| 5.613402
| 0.139175
| 0.095041
| 0.171717
| 0.232323
| 0.835629
| 0.835629
| 0.835629
| 0.835629
| 0.835629
| 0.835629
| 0
| 0.017505
| 0.112983
| 3,027
| 41
| 271
| 73.829268
| 0.793669
| 0.528246
| 0
| 0
| 0
| 0
| 0.180912
| 0.018519
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.888889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
d14d11753aa3c76d68bfaa6fbf2c4c40690891e9
| 19,916
|
py
|
Python
|
sdk/python/pulumi_alicloud/cen/route_service.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 42
|
2019-03-18T06:34:37.000Z
|
2022-03-24T07:08:57.000Z
|
sdk/python/pulumi_alicloud/cen/route_service.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 152
|
2019-04-15T21:03:44.000Z
|
2022-03-29T18:00:57.000Z
|
sdk/python/pulumi_alicloud/cen/route_service.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2020-08-26T17:30:07.000Z
|
2021-07-05T01:37:45.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['RouteServiceArgs', 'RouteService']
@pulumi.input_type
class RouteServiceArgs:
def __init__(__self__, *,
access_region_id: pulumi.Input[str],
cen_id: pulumi.Input[str],
host: pulumi.Input[str],
host_region_id: pulumi.Input[str],
host_vpc_id: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a RouteService resource.
:param pulumi.Input[str] access_region_id: The region of the network instances that access the cloud services.
:param pulumi.Input[str] cen_id: The ID of the CEN instance.
:param pulumi.Input[str] host: The domain name or IP address of the cloud service.
:param pulumi.Input[str] host_region_id: The region of the cloud service.
:param pulumi.Input[str] host_vpc_id: The VPC associated with the cloud service.
:param pulumi.Input[str] description: The description of the cloud service.
"""
pulumi.set(__self__, "access_region_id", access_region_id)
pulumi.set(__self__, "cen_id", cen_id)
pulumi.set(__self__, "host", host)
pulumi.set(__self__, "host_region_id", host_region_id)
pulumi.set(__self__, "host_vpc_id", host_vpc_id)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter(name="accessRegionId")
def access_region_id(self) -> pulumi.Input[str]:
"""
The region of the network instances that access the cloud services.
"""
return pulumi.get(self, "access_region_id")
@access_region_id.setter
def access_region_id(self, value: pulumi.Input[str]):
pulumi.set(self, "access_region_id", value)
@property
@pulumi.getter(name="cenId")
def cen_id(self) -> pulumi.Input[str]:
"""
The ID of the CEN instance.
"""
return pulumi.get(self, "cen_id")
@cen_id.setter
def cen_id(self, value: pulumi.Input[str]):
pulumi.set(self, "cen_id", value)
@property
@pulumi.getter
def host(self) -> pulumi.Input[str]:
"""
The domain name or IP address of the cloud service.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: pulumi.Input[str]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="hostRegionId")
def host_region_id(self) -> pulumi.Input[str]:
"""
The region of the cloud service.
"""
return pulumi.get(self, "host_region_id")
@host_region_id.setter
def host_region_id(self, value: pulumi.Input[str]):
pulumi.set(self, "host_region_id", value)
@property
@pulumi.getter(name="hostVpcId")
def host_vpc_id(self) -> pulumi.Input[str]:
"""
The VPC associated with the cloud service.
"""
return pulumi.get(self, "host_vpc_id")
@host_vpc_id.setter
def host_vpc_id(self, value: pulumi.Input[str]):
pulumi.set(self, "host_vpc_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the cloud service.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class _RouteServiceState:
def __init__(__self__, *,
access_region_id: Optional[pulumi.Input[str]] = None,
cen_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
host: Optional[pulumi.Input[str]] = None,
host_region_id: Optional[pulumi.Input[str]] = None,
host_vpc_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering RouteService resources.
:param pulumi.Input[str] access_region_id: The region of the network instances that access the cloud services.
:param pulumi.Input[str] cen_id: The ID of the CEN instance.
:param pulumi.Input[str] description: The description of the cloud service.
:param pulumi.Input[str] host: The domain name or IP address of the cloud service.
:param pulumi.Input[str] host_region_id: The region of the cloud service.
:param pulumi.Input[str] host_vpc_id: The VPC associated with the cloud service.
:param pulumi.Input[str] status: The status of the cloud service.
"""
if access_region_id is not None:
pulumi.set(__self__, "access_region_id", access_region_id)
if cen_id is not None:
pulumi.set(__self__, "cen_id", cen_id)
if description is not None:
pulumi.set(__self__, "description", description)
if host is not None:
pulumi.set(__self__, "host", host)
if host_region_id is not None:
pulumi.set(__self__, "host_region_id", host_region_id)
if host_vpc_id is not None:
pulumi.set(__self__, "host_vpc_id", host_vpc_id)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="accessRegionId")
def access_region_id(self) -> Optional[pulumi.Input[str]]:
"""
The region of the network instances that access the cloud services.
"""
return pulumi.get(self, "access_region_id")
@access_region_id.setter
def access_region_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_region_id", value)
@property
@pulumi.getter(name="cenId")
def cen_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the CEN instance.
"""
return pulumi.get(self, "cen_id")
@cen_id.setter
def cen_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cen_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the cloud service.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
The domain name or IP address of the cloud service.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="hostRegionId")
def host_region_id(self) -> Optional[pulumi.Input[str]]:
"""
The region of the cloud service.
"""
return pulumi.get(self, "host_region_id")
@host_region_id.setter
def host_region_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_region_id", value)
@property
@pulumi.getter(name="hostVpcId")
def host_vpc_id(self) -> Optional[pulumi.Input[str]]:
"""
The VPC associated with the cloud service.
"""
return pulumi.get(self, "host_vpc_id")
@host_vpc_id.setter
def host_vpc_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_vpc_id", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the cloud service.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
class RouteService(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_region_id: Optional[pulumi.Input[str]] = None,
cen_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
host: Optional[pulumi.Input[str]] = None,
host_region_id: Optional[pulumi.Input[str]] = None,
host_vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a CEN Route Service resource. The virtual border routers (VBRs) and Cloud Connect Network (CCN) instances attached to Cloud Enterprise Network (CEN) instances can access the cloud services deployed in VPCs through the CEN instances.
For information about CEN Route Service and how to use it, see [What is Route Service](https://www.alibabacloud.com/help/en/doc-detail/106671.htm).
> **NOTE:** Available in v1.99.0+.
> **NOTE:** Ensure that at least one VPC in the selected region is attached to the CEN instance.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
config = pulumi.Config()
name = config.get("name")
if name is None:
name = "tf-test"
example_networks = alicloud.vpc.get_networks(is_default=True)
example_instance = alicloud.cen.Instance("exampleInstance")
vpc = alicloud.cen.InstanceAttachment("vpc",
instance_id=example_instance.id,
child_instance_id=example_networks.vpcs[0].id,
child_instance_type="VPC",
child_instance_region_id=example_networks.vpcs[0].region_id)
this = alicloud.cen.RouteService("this",
access_region_id=example_networks.vpcs[0].region_id,
host_region_id=example_networks.vpcs[0].region_id,
host_vpc_id=example_networks.vpcs[0].id,
cen_id=vpc.instance_id,
host="100.118.28.52/32")
```
## Import
CEN Route Service can be imported using the id, e.g.
```sh
$ pulumi import alicloud:cen/routeService:RouteService example cen-ahixm0efqh********:cn-shanghai:100.118.28.52/32:cn-shanghai
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_region_id: The region of the network instances that access the cloud services.
:param pulumi.Input[str] cen_id: The ID of the CEN instance.
:param pulumi.Input[str] description: The description of the cloud service.
:param pulumi.Input[str] host: The domain name or IP address of the cloud service.
:param pulumi.Input[str] host_region_id: The region of the cloud service.
:param pulumi.Input[str] host_vpc_id: The VPC associated with the cloud service.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RouteServiceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a CEN Route Service resource. The virtual border routers (VBRs) and Cloud Connect Network (CCN) instances attached to Cloud Enterprise Network (CEN) instances can access the cloud services deployed in VPCs through the CEN instances.
For information about CEN Route Service and how to use it, see [What is Route Service](https://www.alibabacloud.com/help/en/doc-detail/106671.htm).
> **NOTE:** Available in v1.99.0+.
> **NOTE:** Ensure that at least one VPC in the selected region is attached to the CEN instance.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
config = pulumi.Config()
name = config.get("name")
if name is None:
name = "tf-test"
example_networks = alicloud.vpc.get_networks(is_default=True)
example_instance = alicloud.cen.Instance("exampleInstance")
vpc = alicloud.cen.InstanceAttachment("vpc",
instance_id=example_instance.id,
child_instance_id=example_networks.vpcs[0].id,
child_instance_type="VPC",
child_instance_region_id=example_networks.vpcs[0].region_id)
this = alicloud.cen.RouteService("this",
access_region_id=example_networks.vpcs[0].region_id,
host_region_id=example_networks.vpcs[0].region_id,
host_vpc_id=example_networks.vpcs[0].id,
cen_id=vpc.instance_id,
host="100.118.28.52/32")
```
## Import
CEN Route Service can be imported using the id, e.g.
```sh
$ pulumi import alicloud:cen/routeService:RouteService example cen-ahixm0efqh********:cn-shanghai:100.118.28.52/32:cn-shanghai
```
:param str resource_name: The name of the resource.
:param RouteServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouteServiceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_region_id: Optional[pulumi.Input[str]] = None,
cen_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
host: Optional[pulumi.Input[str]] = None,
host_region_id: Optional[pulumi.Input[str]] = None,
host_vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouteServiceArgs.__new__(RouteServiceArgs)
if access_region_id is None and not opts.urn:
raise TypeError("Missing required property 'access_region_id'")
__props__.__dict__["access_region_id"] = access_region_id
if cen_id is None and not opts.urn:
raise TypeError("Missing required property 'cen_id'")
__props__.__dict__["cen_id"] = cen_id
__props__.__dict__["description"] = description
if host is None and not opts.urn:
raise TypeError("Missing required property 'host'")
__props__.__dict__["host"] = host
if host_region_id is None and not opts.urn:
raise TypeError("Missing required property 'host_region_id'")
__props__.__dict__["host_region_id"] = host_region_id
if host_vpc_id is None and not opts.urn:
raise TypeError("Missing required property 'host_vpc_id'")
__props__.__dict__["host_vpc_id"] = host_vpc_id
__props__.__dict__["status"] = None
super(RouteService, __self__).__init__(
'alicloud:cen/routeService:RouteService',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_region_id: Optional[pulumi.Input[str]] = None,
cen_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
host: Optional[pulumi.Input[str]] = None,
host_region_id: Optional[pulumi.Input[str]] = None,
host_vpc_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None) -> 'RouteService':
"""
Get an existing RouteService resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_region_id: The region of the network instances that access the cloud services.
:param pulumi.Input[str] cen_id: The ID of the CEN instance.
:param pulumi.Input[str] description: The description of the cloud service.
:param pulumi.Input[str] host: The domain name or IP address of the cloud service.
:param pulumi.Input[str] host_region_id: The region of the cloud service.
:param pulumi.Input[str] host_vpc_id: The VPC associated with the cloud service.
:param pulumi.Input[str] status: The status of the cloud service.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RouteServiceState.__new__(_RouteServiceState)
__props__.__dict__["access_region_id"] = access_region_id
__props__.__dict__["cen_id"] = cen_id
__props__.__dict__["description"] = description
__props__.__dict__["host"] = host
__props__.__dict__["host_region_id"] = host_region_id
__props__.__dict__["host_vpc_id"] = host_vpc_id
__props__.__dict__["status"] = status
return RouteService(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessRegionId")
def access_region_id(self) -> pulumi.Output[str]:
"""
The region of the network instances that access the cloud services.
"""
return pulumi.get(self, "access_region_id")
@property
@pulumi.getter(name="cenId")
def cen_id(self) -> pulumi.Output[str]:
"""
The ID of the CEN instance.
"""
return pulumi.get(self, "cen_id")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the cloud service.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def host(self) -> pulumi.Output[str]:
"""
The domain name or IP address of the cloud service.
"""
return pulumi.get(self, "host")
@property
@pulumi.getter(name="hostRegionId")
def host_region_id(self) -> pulumi.Output[str]:
"""
The region of the cloud service.
"""
return pulumi.get(self, "host_region_id")
@property
@pulumi.getter(name="hostVpcId")
def host_vpc_id(self) -> pulumi.Output[str]:
"""
The VPC associated with the cloud service.
"""
return pulumi.get(self, "host_vpc_id")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the cloud service.
"""
return pulumi.get(self, "status")
| 39.991968
| 249
| 0.632507
| 2,472
| 19,916
| 4.855987
| 0.085356
| 0.08064
| 0.1003
| 0.078807
| 0.85988
| 0.838221
| 0.815561
| 0.792902
| 0.778241
| 0.765911
| 0
| 0.005526
| 0.264009
| 19,916
| 497
| 250
| 40.072435
| 0.813412
| 0.348715
| 0
| 0.616601
| 1
| 0
| 0.098141
| 0.003271
| 0
| 0
| 0
| 0
| 0
| 1
| 0.158103
| false
| 0.003953
| 0.019763
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
66f25bf31f61f762d820874dc9fa729638ca4f41
| 35,840
|
py
|
Python
|
tests/test_data.py
|
jscuds/rf-bert
|
6871809e986e418b9b34068260dc11a4531f79f7
|
[
"MIT"
] | null | null | null |
tests/test_data.py
|
jscuds/rf-bert
|
6871809e986e418b9b34068260dc11a4531f79f7
|
[
"MIT"
] | 14
|
2022-02-08T19:46:29.000Z
|
2022-03-13T23:00:50.000Z
|
tests/test_data.py
|
jscuds/rf-bert
|
6871809e986e418b9b34068260dc11a4531f79f7
|
[
"MIT"
] | null | null | null |
import sys
from pathlib import Path
import numpy as np
import torch
from dataloaders import ParaphraseDatasetBert, ParaphraseDatasetElmo
from mosestokenizer import MosesTokenizer
from dataloaders.helpers import load_rotten_tomatoes, load_qqp, load_sst2, train_test_split
# Instantiate ParaphraseDataset class variants for BERT and ELMo
class TestQuoraBert:
quora = ParaphraseDatasetBert(para_dataset = 'quora', model_name = 'bert-base-uncased', num_examples = 20000,
max_length = 40, stop_words_file = './stop_words_en.txt', r1 =0.5, seed = 42)
def test_id_to_sent(self):
# sentence, but pad to length 40
sent = [101, 2129, 2079, 1045, 3857, 2026, 6337, 2005, 2327, 1038, 1011, 2816, 1029, 102] + ([0] * 26)
assert (
self.quora._id_to_sent[213590] == sent
)
def test_sent_to_id(self):
idx = 213590
tokenized_sentence_tuple = tuple(self.quora._id_to_sent[idx])
print('id:', id)
assert self.quora._sent_to_id[tokenized_sentence_tuple] == idx
def test_paraphrase_sets(self):
paraphrase_ids = sorted(list(self.quora._paraphrase_sets))[0]
assert paraphrase_ids == (32, 1101) #[136860, 136861] #[67843, 105500]
print(f'paraphrase_sets[0]:\t{paraphrase_ids}') # (136860, 136861) OR (105500, 67843) OR...because it's a set
def test_para_tuples(self):
assert self.quora._para_tuples[0] == (213590, 149174, 6, 6)
assert len(self.quora._para_tuples) == 20_062 # TODO: CHECK; 17049 colab vs. 17029 local
def test_neg_tuples(self):
assert self.quora._neg_tuples[0] == (416022, 416023, 18, 4)
assert len(self.quora._neg_tuples) == 23099 # TODO: CHECK; 19806 colab vs 19773 local
def test_token_pair_to_neg_tuples(self):
assert self.quora._token_pair_to_neg_tuples[(12952, 12952)] == {7712, 12931, 19173, 11, 21644, 15791, 11824, 20402, 2611, 19732}
# token 12952 == '##pal'
# TODO CHECK; {11, 2614, 7723, 11841, 12950, 15814, 19206, 19765, 20436, 21679}
def test_token_to_sents(self):
assert len(self.quora._token_to_sents.keys()) == 14_543# TODO: check - 13848??
def test_list_types_and_len(self):
assert type(self.quora._para_tuples) == list
assert type(self.quora._neg_tuples) == list
assert len(self.quora) == len(self.quora._para_tuples)
def test_quora_example_types(self):
test_sent1, test_sent2, test_nsent1, test_nsent2, test_token1, test_token2, test_ntoken1, test_ntoken2 = self.quora[0]
assert isinstance(test_sent1, torch.Tensor)
assert isinstance(test_sent2, torch.Tensor)
assert isinstance(test_nsent1, torch.Tensor)
assert isinstance(test_nsent2, torch.Tensor)
assert isinstance(test_token1, int)
assert isinstance(test_token2, int)
assert isinstance(test_ntoken1, int)
assert isinstance(test_ntoken2, int)
class TestQuoraElmo:
quora = ParaphraseDatasetElmo(para_dataset = 'quora', model_name = 'elmo', num_examples = 20000,
max_length = 40, stop_words_file = './stop_words_en.txt', r1 = 0.5, seed = 42, split = 'train') # all of these are seed dependent
def test_length(self):
assert len(self.quora) == 16753
def test_id_to_sent(self):
# sentence tuple for 'What is the best self help book you have read ? Why ? How did it change your life ?'
sent = ((259, 88, 105, 98, 117, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 106, 116, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 117, 105, 102, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 99, 102, 116, 117, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 116, 102, 109, 103, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 105, 102, 109, 113, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 99, 112, 112, 108, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 122, 112, 118, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 105, 98, 119, 102, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 115, 102, 98, 101, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 64, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 88, 105, 122, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 64, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 73, 112, 120, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 101, 106, 101, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 106, 117, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 100, 105, 98, 111, 104, 102, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 122, 112, 118, 115, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 109, 106, 103, 102, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 64, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
assert (
self.quora._id_to_sent[70] == sent
)
def test_sent_to_id(self):
idx = 70
tokenized_sentence_tuple = tuple(self.quora._id_to_sent[idx])
assert self.quora._sent_to_id[tokenized_sentence_tuple] == idx
def test_paraphrase_sets(self):
paraphrase_ids = sorted(list(self.quora._paraphrase_sets))[0]
paraphrase_sets = self.quora._paraphrase_sets
assert paraphrase_ids == (70, 71)
assert (46311, 46310) in paraphrase_sets
assert (4090, 4091) in paraphrase_sets
print(f'sorted(paraphrase_sets)[0]:\t{paraphrase_ids}')
def test_para_tuples(self):
assert sorted(self.quora._para_tuples)[0] == (70, 71, 9, 9)
assert (3540, 3541, 3, 5) in self.quora._para_tuples
assert len(self.quora._para_tuples) == len(self.quora)
def test_neg_tuples(self):
assert self.quora._neg_tuples[0] == (2704640, 2704641, 3, 3)
assert len(self.quora._neg_tuples) == 19_559
def test_token_pair_to_neg_tuples(self):
# tuple representation of 'Zealand'
Zealand = (259, 91, 102, 98, 109, 98, 111, 101, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261)
assert self.quora._token_pair_to_neg_tuples[(Zealand, Zealand)] in [{8358, 8968, 19471}] #{0, 11654} # NOTE: this is system dependent (cluster -> 11653, local -> 11654); it references the index of the list self._neg_tuples; so the index changes based on how the list was created
def test_token_to_sents(self):
assert len(self.quora._token_to_sents.keys()) == 23_853
def test_list_types_and_len(self):
assert type(self.quora._para_tuples) == list
assert type(self.quora._neg_tuples) == list
assert len(self.quora) == len(self.quora._para_tuples)
def test_quora_example_types(self):
test_sent1, test_sent2, test_nsent1, test_nsent2, test_token1, test_token2, test_ntoken1, test_ntoken2 = self.quora[0]
assert isinstance(test_sent1, torch.Tensor)
assert isinstance(test_sent2, torch.Tensor)
assert isinstance(test_nsent1, torch.Tensor)
assert isinstance(test_nsent2, torch.Tensor)
assert isinstance(test_token1, int)
assert isinstance(test_token2, int)
assert isinstance(test_ntoken1, int)
assert isinstance(test_ntoken2, int)
class TestMrpcElmo:
mrpc = ParaphraseDatasetElmo(para_dataset = 'mrpc', model_name = 'elmo', num_examples = None,
max_length = 40, stop_words_file = './stop_words_en.txt', r1 = 0.5, seed = 42,
split = 'train', lowercase_inputs=False) # all of these are seed dependent
mrpc_lower = ParaphraseDatasetElmo(para_dataset = 'mrpc', model_name = 'elmo', num_examples = None,
max_length = 40, stop_words_file = './stop_words_en.txt', r1 = 0.5, seed = 42,
split = 'train', lowercase_inputs=True) # all of these are seed dependent
def test_length(self):
assert len(self.mrpc) == 10473
def test_id_to_sent(self):
# sentence tuple for "Amrozi accused his brother, whom he called "the witness", of deliberately distorting his evidence."
sent = ((259, 66, 110, 115, 112, 123, 106, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 98, 100, 100, 118, 116, 102, 101, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 105, 106, 116, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 99, 115, 112, 117, 105, 102, 115, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 45, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 120, 105, 112, 110, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 105, 102, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 100, 98, 109, 109, 102, 101, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 35, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 117, 105, 102, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 120, 106, 117, 111, 102, 116, 116, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 35, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 45, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 112, 103, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 101, 102, 109, 106, 99, 102, 115, 98, 117, 102, 109, 122, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 101, 106, 116, 117, 112, 115, 117, 106, 111, 104, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 105, 106, 116, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 102, 119, 106, 101, 102, 111, 100, 102, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 47, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
assert (
self.mrpc._id_to_sent[0] == sent
)
def test_sent_to_id(self):
idx = 80
tokenized_sentence_tuple = tuple(self.mrpc._id_to_sent[idx])
assert self.mrpc._sent_to_id[tokenized_sentence_tuple] == idx
def test_paraphrase_sets(self):
paraphrase_ids = sorted(list(self.mrpc._paraphrase_sets))[0]
paraphrase_sets = self.mrpc._paraphrase_sets
assert paraphrase_ids == (0, 1)
assert (0, 1) in paraphrase_sets
assert (1, 0) in paraphrase_sets
def test_para_tuples(self):
assert sorted(self.mrpc._para_tuples)[0] == (0, 1, 0, 10)
assert (39190, 39191, 24, 25) in self.mrpc._para_tuples
assert len(self.mrpc._para_tuples) == 10_473
def test_neg_tuples(self):
assert self.mrpc._neg_tuples[0] == (39460, 39461, 14, 24)
assert len(self.mrpc._neg_tuples) == 3982
def test_token_pair_to_neg_tuples(self):
# tuple representation of 'intrusions'
intrusions = (259, 106, 111, 117, 115, 118, 116, 106, 112, 111, 116, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261)
assert self.mrpc._token_pair_to_neg_tuples[(intrusions, intrusions)] in [{0}] # NOTE: this is system dependent (cluster -> 11653, local -> 11654); it references the index of the list self._neg_tuples; so the index changes based on how the list was created
def test_token_to_sents(self):
assert len(self.mrpc._token_to_sents.keys()) == 10_660
def test_list_types_and_len(self):
assert type(self.mrpc._para_tuples) == list
assert type(self.mrpc._neg_tuples) == list
assert len(self.mrpc) == len(self.mrpc._para_tuples)
def test_quora_example_types(self):
test_sent1, test_sent2, test_nsent1, test_nsent2, test_token1, test_token2, test_ntoken1, test_ntoken2 = self.mrpc[0]
assert isinstance(test_sent1, torch.Tensor)
assert isinstance(test_sent2, torch.Tensor)
assert isinstance(test_nsent1, torch.Tensor)
assert isinstance(test_nsent2, torch.Tensor)
assert isinstance(test_token1, int)
assert isinstance(test_token2, int)
assert isinstance(test_ntoken1, int)
assert isinstance(test_ntoken2, int)
def test_lowercase(self):
# sentence tuple for 'amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .'
# ^ normally "Amrozi" would be capitalized
sent = ((259, 98, 110, 115, 112, 123, 106, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 98, 100, 100, 118, 116, 102, 101, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 105, 106, 116, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 99, 115, 112, 117, 105, 102, 115, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 45, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 120, 105, 112, 110, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 105, 102, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 100, 98, 109, 109, 102, 101, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 35, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 117, 105, 102, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 120, 106, 117, 111, 102, 116, 116, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 35, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 45, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 112, 103, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 101, 102, 109, 106, 99, 102, 115, 98, 117, 102, 109, 122, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 101, 106, 116, 117, 112, 115, 117, 106, 111, 104, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 105, 106, 116, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 102, 119, 106, 101, 102, 111, 100, 102, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (259, 47, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
assert (
self.mrpc_lower._id_to_sent[0] == sent
)
class TestClassificationDatasets:
def test_rotten_tomatoes_elmo(self):
batch_size = 19
num_examples = 100
max_length = 173
train_dataloader, test_dataloader = load_rotten_tomatoes(
batch_size=batch_size, max_length=max_length,
num_examples=num_examples, drop_last=False
)
item = next(iter(train_dataloader))
batch, labels = item
assert batch.shape == (batch_size, max_length, 50)
assert labels[0].item() in {0, 1}
def test_rotten_tomatoes_elmo_full(self):
# Loads the full RT dataset (num examples is None).
# Checks the length of both datasets.
batch_size = 1
max_length = 16
num_examples = None
train_dataloader, test_dataloader = load_rotten_tomatoes(
batch_size=batch_size, max_length=max_length,
num_examples=num_examples, drop_last=False
)
assert len(train_dataloader) == 8530
assert len(test_dataloader) == 1066
def test_qqp_elmo(self):
batch_size = 7
num_examples = 19
max_length = 173
train_dataloader, test_dataloader = load_qqp(
batch_size=batch_size, max_length=max_length,
num_examples=num_examples, drop_last=False
)
item = next(iter(train_dataloader))
s1, s2, labels = item
# Make sure sentences are the proper shape
assert s1.shape == (batch_size, max_length, 50)
assert s2.shape == (batch_size, max_length, 50)
# make sure all labels in batch are in {0, 1}
assert torch.logical_or(labels == 0, labels == 1).all()
def test_sst2_elmo(self):
batch_size = 17
num_examples = 50
max_length = 20
train_dataloader, test_dataloader = load_sst2(
batch_size=batch_size, max_length=max_length,
num_examples=num_examples, drop_last=False
)
item = next(iter(train_dataloader))
s1, labels = item
# Make sure sentences are the proper shape
assert s1.shape == (batch_size, max_length, 50)
# make sure all labels in batch are in {0, 1}
assert torch.logical_or(labels == 0, labels == 1).all()
| 136.793893
| 8,083
| 0.545815
| 7,622
| 35,840
| 2.513776
| 0.042377
| 0.323382
| 0.484447
| 0.645303
| 0.896921
| 0.876722
| 0.856733
| 0.851827
| 0.836169
| 0.83356
| 0
| 0.470322
| 0.246931
| 35,840
| 262
| 8,084
| 136.793893
| 0.23957
| 0.043806
| 0
| 0.464646
| 0
| 0
| 0.006513
| 0.002395
| 0
| 0
| 0
| 0.003817
| 0.393939
| 1
| 0.171717
| false
| 0
| 0.035354
| 0
| 0.247475
| 0.015152
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
66fb839dc2ded8bc147d263448f2d1881bbe48a5
| 7,987
|
py
|
Python
|
model.py
|
Lucianod28/Temporal-MIL
|
93cab2cf66154a2cc7e3e0261c22f6ec0baea471
|
[
"MIT"
] | null | null | null |
model.py
|
Lucianod28/Temporal-MIL
|
93cab2cf66154a2cc7e3e0261c22f6ec0baea471
|
[
"MIT"
] | null | null | null |
model.py
|
Lucianod28/Temporal-MIL
|
93cab2cf66154a2cc7e3e0261c22f6ec0baea471
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self):
super(Attention, self).__init__()
self.L = 500
self.D = 128
self.K = 1
self.feature_extractor_part1 = nn.Sequential(
nn.Conv2d(1, 20, kernel_size=5),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(20, 50, kernel_size=5),
nn.ReLU(),
nn.MaxPool2d(2, stride=2)
)
self.feature_extractor_part2 = nn.Sequential(
nn.Linear(50 * 4 * 4, self.L),
nn.ReLU(),
nn.Dropout(0.5)
)
self.attention = nn.Sequential(
nn.Linear(self.L, self.D),
nn.Tanh(),
nn.Linear(self.D, self.K)
)
self.classifier = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(self.L*self.K, 1),
nn.Sigmoid()
)
def forward(self, x):
x = x.squeeze(0)
H = self.feature_extractor_part1(x)
H = H.view(-1, 50 * 4 * 4)
H = self.feature_extractor_part2(H) # NxL
A = self.attention(H) # NxK
A = torch.transpose(A, 1, 0) # KxN
A = F.softmax(A, dim=1) # softmax over N
M = torch.mm(A, H) # KxL
Y_prob = self.classifier(M)
Y_hat = torch.ge(Y_prob, 0.5).float()
return Y_prob, Y_hat, A
# AUXILIARY METHODS
def calculate_classification_error(self, X, Y):
Y = Y.float()
_, Y_hat, _ = self.forward(X)
error = 1. - Y_hat.eq(Y).cpu().float().mean().data.item()
return error, Y_hat
def calculate_objective(self, X, Y, z_tilde=None, unsupervised_weight=None, labeled=True):
# Y_prob is z
Y_prob, _, A = self.forward(X)
Y_prob = torch.clamp(Y_prob, min=1e-5, max=1. - 1e-5)
# temporal ensembling term:
loss = 0
neg_log_likelihood = 0
temporal_ensembling_loss = 0
if z_tilde is not None: # Use temporal ensembling for train
temporal_ensembling_loss = unsupervised_weight * ((Y_prob - z_tilde)**2).mean().reshape((1, 1))
# temporal_ensembling_loss = unsupervised_weight * ((Y_prob - z_tilde)**2).reshape((1, 1))
if labeled:
Y = Y.float()
neg_log_likelihood = -1. * (Y * torch.log(Y_prob) + (1. - Y) * torch.log(1. - Y_prob)) # negative log bernoulli
loss = temporal_ensembling_loss + neg_log_likelihood
return loss, A, Y_prob, neg_log_likelihood, temporal_ensembling_loss
def calculate_neg_log_objective(self, X, Y):
Y = Y.float()
Y_prob, _, A = self.forward(X)
Y_prob = torch.clamp(Y_prob, min=1e-5, max=1. - 1e-5)
neg_log_likelihood = -1. * (Y * torch.log(Y_prob) + (1. - Y) * torch.log(1. - Y_prob)) # negative log bernoulli
return neg_log_likelihood, A
class CancerAttention(nn.Module):
def __init__(self):
super().__init__()
self.L = 512
self.D = 128
self.K = 1
self.feature_extractor_part1 = nn.Sequential(
nn.Conv2d(3, 18, kernel_size=4),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(18, 24, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(2, stride=2)
)
self.feature_extractor_part2 = nn.Sequential(
nn.Linear(580608//672, self.L),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(self.L, self.L),
nn.ReLU(),
nn.Dropout(0.5)
)
self.attention = nn.Sequential(
nn.Linear(self.L, self.D),
nn.Tanh(),
nn.Linear(self.D, self.K)
)
self.classifier = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(self.L*self.K, 1),
nn.Sigmoid()
)
def forward(self, x):
x = x.squeeze(0)
H = self.feature_extractor_part1(x)
H = H.view(-1, 580608//672)
H = self.feature_extractor_part2(H) # NxL
A = self.attention(H) # NxK
A = torch.transpose(A, 1, 0) # KxN
A = F.softmax(A, dim=1) # softmax over N
M = torch.mm(A, H) # KxL
Y_prob = self.classifier(M)
Y_hat = torch.ge(Y_prob, 0.5).float()
return Y_prob, Y_hat, A
# AUXILIARY METHODS
def calculate_classification_error(self, X, Y):
Y = Y.float()
_, Y_hat, _ = self.forward(X)
error = 1. - Y_hat.eq(Y).cpu().float().mean().data.item()
return error, Y_hat
def calculate_objective(self, X, Y, z_tilde=None, unsupervised_weight=None, labeled=True):
# Y_prob is z
Y_prob, _, A = self.forward(X)
Y_prob = torch.clamp(Y_prob, min=1e-5, max=1. - 1e-5)
# temporal ensembling term:
loss = 0
neg_log_likelihood = 0
temporal_ensembling_loss = 0
if z_tilde is not None: # Use temporal ensembling for train
temporal_ensembling_loss = unsupervised_weight * ((Y_prob - z_tilde)**2).mean().reshape((1, 1))
# temporal_ensembling_loss = unsupervised_weight * ((Y_prob - z_tilde)**2).reshape((1, 1))
if labeled:
Y = Y.float()
neg_log_likelihood = -1. * (Y * torch.log(Y_prob) + (1. - Y) * torch.log(1. - Y_prob)) # negative log bernoulli
loss = temporal_ensembling_loss + neg_log_likelihood
return loss, A, Y_prob, neg_log_likelihood, temporal_ensembling_loss
def calculate_neg_log_objective(self, X, Y):
Y = Y.float()
Y_prob, _, A = self.forward(X)
Y_prob = torch.clamp(Y_prob, min=1e-5, max=1. - 1e-5)
neg_log_likelihood = -1. * (Y * torch.log(Y_prob) + (1. - Y) * torch.log(1. - Y_prob)) # negative log bernoulli
return neg_log_likelihood, A
class GatedAttention(nn.Module):
def __init__(self):
super(GatedAttention, self).__init__()
self.L = 500
self.D = 128
self.K = 1
self.feature_extractor_part1 = nn.Sequential(
nn.Conv2d(1, 20, kernel_size=5),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(20, 50, kernel_size=5),
nn.ReLU(),
nn.MaxPool2d(2, stride=2)
)
self.feature_extractor_part2 = nn.Sequential(
nn.Linear(50 * 4 * 4, self.L),
nn.ReLU(),
)
self.attention_V = nn.Sequential(
nn.Linear(self.L, self.D),
nn.Tanh()
)
self.attention_U = nn.Sequential(
nn.Linear(self.L, self.D),
nn.Sigmoid()
)
self.attention_weights = nn.Linear(self.D, self.K)
self.classifier = nn.Sequential(
nn.Linear(self.L*self.K, 1),
nn.Sigmoid()
)
def forward(self, x):
x = x.squeeze(0)
H = self.feature_extractor_part1(x)
H = H.view(-1, 50 * 4 * 4)
H = self.feature_extractor_part2(H) # NxL
A_V = self.attention_V(H) # NxD
A_U = self.attention_U(H) # NxD
A = self.attention_weights(A_V * A_U) # element wise multiplication # NxK
A = torch.transpose(A, 1, 0) # KxN
A = F.softmax(A, dim=1) # softmax over N
M = torch.mm(A, H) # KxL
Y_prob = self.classifier(M)
Y_hat = torch.ge(Y_prob, 0.5).float()
return Y_prob, Y_hat, A
# AUXILIARY METHODS
def calculate_classification_error(self, X, Y):
Y = Y.float()
_, Y_hat, _ = self.forward(X)
error = 1. - Y_hat.eq(Y).cpu().float().mean().item()
return error, Y_hat
def calculate_objective(self, X, Y):
Y = Y.float()
Y_prob, _, A = self.forward(X)
Y_prob = torch.clamp(Y_prob, min=1e-5, max=1. - 1e-5)
neg_log_likelihood = -1. * (Y * torch.log(Y_prob) + (1. - Y) * torch.log(1. - Y_prob)) # negative log bernoulli
return neg_log_likelihood, A
| 31.199219
| 124
| 0.546263
| 1,115
| 7,987
| 3.725561
| 0.104933
| 0.050554
| 0.053924
| 0.024073
| 0.922244
| 0.922244
| 0.904911
| 0.904911
| 0.902263
| 0.880838
| 0
| 0.039277
| 0.321022
| 7,987
| 255
| 125
| 31.321569
| 0.72672
| 0.077626
| 0
| 0.807292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072917
| false
| 0
| 0.015625
| 0
| 0.161458
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0f03c88c09fafd1b27ae64a327d043c65eb119d7
| 2,336
|
py
|
Python
|
ConvertMM_support.py
|
JonRob812/GimmeInches
|
51652d1e49a99455619ad3d82e24a7085cc55dfd
|
[
"Apache-2.0"
] | null | null | null |
ConvertMM_support.py
|
JonRob812/GimmeInches
|
51652d1e49a99455619ad3d82e24a7085cc55dfd
|
[
"Apache-2.0"
] | null | null | null |
ConvertMM_support.py
|
JonRob812/GimmeInches
|
51652d1e49a99455619ad3d82e24a7085cc55dfd
|
[
"Apache-2.0"
] | null | null | null |
<<<<<<< HEAD
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Support module generated by PAGE version 4.26
# in conjunction with Tcl version 8.6
# Dec 30, 2019 03:31:08 PM PST platform: Windows NT
import sys
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
def set_Tk_var():
global inputVAR
global Input_Text_Var
inputVAR = tk.StringVar()
Input_Text_Var = tk.StringVar()
def gimmeInches():
_in = float(inputVAR.get())
_out = round(_in / 25.4, 5)
inputVAR.set("in = " + str(_out))
Input_Text_Var.set("mm = " + str(_in))
widget = root.focus_get()
widget.selection_range(0, 'end')
root.clipboard_clear()
root.clipboard_append(_out)
sys.stdout.flush()
def enter(event):
gimmeInches()
def init(top, gui, *args, **kwargs):
global w, top_level, root
w = gui
top_level = top
root = top
def destroy_window():
# Function which closes the window.
global top_level
top_level.destroy()
top_level = None
if __name__ == '__main__':
import ConvertMM
ConvertMM.vp_start_gui()
=======
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Support module generated by PAGE version 4.26
# in conjunction with Tcl version 8.6
# Dec 30, 2019 03:31:08 PM PST platform: Windows NT
import sys
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
def set_Tk_var():
global inputVAR
inputVAR = tk.StringVar()
def gimmeInches():
_in = float(inputVAR.get())
_out = round(_in / 25.4, 5)
inputVAR.set(_out)
widget = root.focus_get()
widget.selection_range(0, 'end')
root.clipboard_clear()
root.clipboard_append(_out)
sys.stdout.flush()
def enter(event):
gimmeInches()
def init(top, gui, *args, **kwargs):
global w, top_level, root
w = gui
top_level = top
root = top
def destroy_window():
# Function which closes the window.
global top_level
top_level.destroy()
top_level = None
if __name__ == '__main__':
import ConvertMM
ConvertMM.vp_start_gui()
>>>>>>> 1bda16814c81dcc21fcb2be16797f231b787264e
| 18.688
| 55
| 0.651113
| 322
| 2,336
| 4.534161
| 0.298137
| 0.054795
| 0.041096
| 0.046575
| 0.908219
| 0.908219
| 0.908219
| 0.908219
| 0.908219
| 0.908219
| 0
| 0.042017
| 0.235873
| 2,336
| 124
| 56
| 18.83871
| 0.77591
| 0.184075
| 0
| 0.9
| 1
| 0
| 0.016913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.2
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
0f41de5064da6dfaa18e7a20270fb7a513427168
| 18,970
|
py
|
Python
|
model/mydeeplab.py
|
statsu1990/kaggle_understanding_clouds
|
756c8271855d232167a76bd25f8bb81e7505a422
|
[
"MIT"
] | null | null | null |
model/mydeeplab.py
|
statsu1990/kaggle_understanding_clouds
|
756c8271855d232167a76bd25f8bb81e7505a422
|
[
"MIT"
] | 6
|
2020-01-28T23:08:31.000Z
|
2022-02-10T00:24:01.000Z
|
model/mydeeplab.py
|
statsu1990/kaggle_understanding_clouds
|
756c8271855d232167a76bd25f8bb81e7505a422
|
[
"MIT"
] | null | null | null |
from keras.models import Model
from keras.layers import Input, Conv2D, BatchNormalization, Activation, LeakyReLU, Lambda
from keras.layers import MaxPooling2D, AveragePooling2D, UpSampling2D, Dropout
from keras.layers import Concatenate, Add, Multiply
from keras.regularizers import l2
from keras import backend as K
import tensorflow as tf
import numpy as np
from model import deeplab_v3
from model.cos_similarity import CosSimilarityWithFeatvec, ActivityRegularization_L2NormToConst
def mydeeplab_v1(input_shape, num_class):
DOWNSIZE_RATE = 1/2
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.output
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
oup = Activation('sigmoid')(x)
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_v2(input_shape, num_class):
DOWNSIZE_RATE = 1/2
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3(cityscapes)
dplb_model = deeplab_v3.Deeplabv3(weights='cityscapes', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.output
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
oup = Activation('sigmoid')(x)
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_mask_v1(input_shape, num_class):
DOWNSIZE_RATE = 1/2
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class+1, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.output
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
x = Activation('sigmoid')(x)
mask = Lambda(lambda _x: _x[...,-2:-1])(x)
temp_prob = Lambda(lambda _x: _x[...,:-1])(x)
prob = Multiply()([temp_prob, mask])
oup = Concatenate()([prob, mask])
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_v3(input_shape, num_class):
DOWNSIZE_RATE = 2/3
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.output
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
oup = Activation('sigmoid')(x)
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_v4(input_shape, num_class):
DOWNSIZE_RATE = 4/5
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.output
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
oup = Activation('sigmoid')(x)
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_v5(input_shape, num_class):
DOWNSIZE_RATE = 1
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.output
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
oup = Activation('sigmoid')(x)
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_mask_v2(input_shape, num_class):
DOWNSIZE_RATE = 2/3
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class+1, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.output
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
x = Activation('sigmoid')(x)
mask = Lambda(lambda _x: _x[...,-2:-1])(x)
temp_prob = Lambda(lambda _x: _x[...,:-1])(x)
prob = Multiply()([temp_prob, mask])
oup = Concatenate()([prob, mask])
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_l2norm_v1(input_shape, num_class, scale):
DOWNSIZE_RATE = 2/3
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
#x = dplb_model.output
x = dplb_model.layers[161].output # feature map
#x = dplb_model.layers[162].output # dropout feature map
# L2 normalization
x = Lambda(lambda _x: tf.math.l2_normalize(_x, axis=-1), name='L2_normalization')(x)
x = Lambda(lambda _x: _x * scale, name='scaling')(x)
# custom_logits_semantic
x = Conv2D(num_class, (1, 1), padding='same', name='custom_logits_semantic')(x)
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
oup = Activation('sigmoid')(x)
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_featvec_v1(input_shape, num_class, regu_coef=None, act_regu_coef=None, oup_act='relu', downsize_rate=2/3):
DOWNSIZE_RATE = downsize_rate
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.layers[161].output # feature map (B, H, W, F)
# cosine similarity (B, H, W, Class)
if act_regu_coef is not None:
n_ele = K.int_shape(x)[-1]
x = ActivityRegularization_L2NormToConst(c=np.sqrt(n_ele), coef=act_regu_coef, axis=-1)(x)
x = CosSimilarityWithFeatvec(n_vec=num_class, regu_coef=regu_coef, name='cos_similarity_with_featvec')(x)
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
if oup_act=='leakyrelu':
oup = LeakyReLU(alpha=0.1)(x)
else:
oup = Activation(oup_act)(x)
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_featvec_v2(input_shape, num_class, regu_coef=None, act_regu_coef=None, oup_act='relu', downsize_rate=2/3):
DOWNSIZE_RATE = downsize_rate
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.layers[160].output # feature map (batch normalization before relu) (B, H, W, F)
#x = dplb_model.layers[161].output # feature map (relu) (B, H, W, F)
# cosine similarity (B, H, W, Class)
if act_regu_coef is not None:
n_ele = K.int_shape(x)[-1]
x = ActivityRegularization_L2NormToConst(c=np.sqrt(n_ele), coef=act_regu_coef, axis=-1)(x)
x = CosSimilarityWithFeatvec(n_vec=num_class, regu_coef=regu_coef, name='cos_similarity_with_featvec')(x)
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
if oup_act=='leakyrelu':
oup = LeakyReLU(alpha=0.1)(x)
else:
oup = Activation(oup_act)(x)
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_featvec_v2_2(input_shape, num_class, regu_coef=None, act_regu_coef=None, oup_act='relu', scaling=False, downsize_rate=2/3):
DOWNSIZE_RATE = downsize_rate
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.layers[159].output # feature map (conv before batchnormalization) (B, H, W, F)
#x = dplb_model.layers[160].output # feature map (batchnormalization before relu) (B, H, W, F)
#x = dplb_model.layers[161].output # feature map (relu) (B, H, W, F)
# scaling
if scaling:
x = BatchNormalization(center=False)(x)
# cosine similarity (B, H, W, Class)
if act_regu_coef is not None:
n_ele = K.int_shape(x)[-1]
x = ActivityRegularization_L2NormToConst(c=np.sqrt(n_ele), coef=act_regu_coef, axis=-1)(x)
x = CosSimilarityWithFeatvec(n_vec=num_class, regu_coef=regu_coef, name='cos_similarity_with_featvec')(x)
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
if oup_act=='leakyrelu':
oup = LeakyReLU(alpha=0.1)(x)
else:
oup = Activation(oup_act)(x)
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_featvec_v3(input_shape, num_class, n_vec, downsize_rate=2/3):
DOWNSIZE_RATE = downsize_rate
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.layers[160].output # feature map (batch normalization before relu) (B, H, W, F)
#x = dplb_model.layers[161].output # feature map (relu) (B, H, W, F)
# cosine similarity (B, H, W, Class)
x = CosSimilarityWithFeatvec(n_vec=n_vec, name='cos_similarity_with_featvec')(x)
# custom_logits_semantic
x = Conv2D(num_class, (1, 1), padding='same', name='custom_logits_semantic')(x)
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
oup = Activation('sigmoid')(x)
#oup = Activation('relu')(x)
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_featvec_v3_1(input_shape, num_class, n_vec, downsize_rate=2/3):
DOWNSIZE_RATE = downsize_rate
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.layers[160].output # feature map (batch normalization before relu) (B, H, W, F)
#x = dplb_model.layers[161].output # feature map (relu) (B, H, W, F)
# cosine similarity (B, H, W, Class)
x = CosSimilarityWithFeatvec(n_vec=n_vec, name='cos_similarity_with_featvec')(x)
x = Activation('relu')(x)
# custom_logits_semantic
x = Conv2D(num_class, (1, 1), padding='same', name='custom_logits_semantic')(x)
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
oup = Activation('sigmoid')(x)
#oup = Activation('relu')(x)
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_featvec_v4(input_shape, num_class, n_vec, n_last_hidden, downsize_rate=2/3):
DOWNSIZE_RATE = downsize_rate
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.layers[160].output # feature map (batch normalization before relu) (B, H, W, F)
#x = dplb_model.layers[161].output # feature map (relu) (B, H, W, F)
# cosine similarity (B, H, W, Class)
x = CosSimilarityWithFeatvec(n_vec=n_vec, name='cos_similarity_with_featvec')(x)
# hidden
x = Conv2D(n_last_hidden, (1, 1), padding='same', name='last_hidden', kernel_initializer='he_normal')(x)
x = Activation('relu')(x)
# custom_logits_semantic
x = Conv2D(num_class, (1, 1), padding='same', name='custom_logits_semantic')(x)
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
oup = Activation('sigmoid')(x)
#oup = Activation('relu')(x)
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_featvec_v4_1(input_shape, num_class, n_vec, n_last_hidden, downsize_rate=2/3):
DOWNSIZE_RATE = downsize_rate
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.layers[160].output # feature map (batch normalization before relu) (B, H, W, F)
#x = dplb_model.layers[161].output # feature map (relu) (B, H, W, F)
# cosine similarity (B, H, W, Class)
x = CosSimilarityWithFeatvec(n_vec=n_vec, name='cos_similarity_with_featvec')(x)
# hidden
x = Conv2D(n_last_hidden, (1, 1), padding='same', name='last_hidden', kernel_initializer='he_normal')(x)
x = Activation('relu')(x)
x = Dropout(0.1)(x)
# custom_logits_semantic
x = Conv2D(num_class, (1, 1), padding='same', name='custom_logits_semantic')(x)
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
oup = Activation('sigmoid')(x)
#oup = Activation('relu')(x)
#
model = Model(input = inputs, output = oup)
return model
def mydeeplab_featvec_v4_2(input_shape, num_class, n_vec, n_last_hidden, downsize_rate=2/3):
DOWNSIZE_RATE = downsize_rate
downsize = (int(input_shape[0] * DOWNSIZE_RATE), int(input_shape[1] * DOWNSIZE_RATE))
# input
inputs = Input(input_shape)
x = inputs
# downsize
x = Lambda(lambda _x: tf.image.resize(_x, downsize))(x) #BILINEAR
# Deeplabv3
dplb_model = deeplab_v3.Deeplabv3(weights='pascal_voc', input_tensor=x, input_shape=K.int_shape(x)[1:], classes=num_class, backbone='mobilenetv2', OS=16, alpha=1., activation=None)
x = dplb_model.layers[160].output # feature map (batch normalization before relu) (B, H, W, F)
#x = dplb_model.layers[161].output # feature map (relu) (B, H, W, F)
# cosine similarity (B, H, W, Class)
x = CosSimilarityWithFeatvec(n_vec=n_vec, name='cos_similarity_with_featvec')(x)
x = Activation('relu')(x)
# hidden
x = Conv2D(n_last_hidden, (1, 1), padding='same', name='last_hidden', kernel_initializer='he_normal')(x)
x = Activation('relu')(x)
# custom_logits_semantic
x = Conv2D(num_class, (1, 1), padding='same', name='custom_logits_semantic')(x)
# upsampling
x = Lambda(lambda _x: tf.image.resize(_x, input_shape[:2]))(x) #BILINEAR
# output
oup = Activation('sigmoid')(x)
#oup = Activation('relu')(x)
#
model = Model(input = inputs, output = oup)
return model
# wrapper
def mydeeplab_featvec_wrapper_relu_last(deeplab_featvec_model):
inp = deeplab_featvec_model.input
x = deeplab_featvec_model.output
oup = Activation('relu')(x)
model = Model(input=inp, output=oup)
return model
| 35
| 187
| 0.655034
| 2,630
| 18,970
| 4.509125
| 0.05019
| 0.080951
| 0.041656
| 0.040138
| 0.92765
| 0.92394
| 0.918543
| 0.91264
| 0.904798
| 0.904798
| 0
| 0.023035
| 0.21049
| 18,970
| 542
| 188
| 35
| 0.768779
| 0.139167
| 0
| 0.846743
| 0
| 0
| 0.061315
| 0.022296
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065134
| false
| 0
| 0.038314
| 0
| 0.168582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0f5d6fc114b7ed0258df90d332f3419a11890b7d
| 152
|
py
|
Python
|
pydlx/dlx_recursive_solver.py
|
tdons/dlx
|
f8222519a06e635bf6ada5ae95ce7867d26c9546
|
[
"MIT"
] | 1
|
2019-01-25T22:39:43.000Z
|
2019-01-25T22:39:43.000Z
|
pydlx/dlx_recursive_solver.py
|
tdons/dlx
|
f8222519a06e635bf6ada5ae95ce7867d26c9546
|
[
"MIT"
] | null | null | null |
pydlx/dlx_recursive_solver.py
|
tdons/dlx
|
f8222519a06e635bf6ada5ae95ce7867d26c9546
|
[
"MIT"
] | null | null | null |
from ctypes import *
from pydlx import _libdlx
def count_solutions(matrix):
return _libdlx.dlx_recursive_solver_count_solutions(matrix._get_matrix())
| 25.333333
| 74
| 0.842105
| 21
| 152
| 5.666667
| 0.666667
| 0.235294
| 0.336134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092105
| 152
| 6
| 74
| 25.333333
| 0.862319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
0f883926f773a4a3720e8b0ee7404319d713dd32
| 158
|
py
|
Python
|
skeleton/__init__.py
|
therden/skeletal_flask
|
ad8af37376af888c999ab5de5b69ba0039b557c0
|
[
"Unlicense"
] | 3
|
2020-01-02T07:58:52.000Z
|
2020-11-25T20:31:37.000Z
|
skeleton/__init__.py
|
therden/skeletal_flask
|
ad8af37376af888c999ab5de5b69ba0039b557c0
|
[
"Unlicense"
] | null | null | null |
skeleton/__init__.py
|
therden/skeletal_flask
|
ad8af37376af888c999ab5de5b69ba0039b557c0
|
[
"Unlicense"
] | 1
|
2020-06-19T03:00:28.000Z
|
2020-06-19T03:00:28.000Z
|
from skeleton import config_app
from skeleton import config_db
from skeleton import models
from skeleton import views
from skeleton import config_flask_admin
| 26.333333
| 39
| 0.873418
| 24
| 158
| 5.583333
| 0.416667
| 0.447761
| 0.671642
| 0.537313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126582
| 158
| 5
| 40
| 31.6
| 0.971014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0f8f52033b010b12749c725e76ebd817f74fcd2c
| 4,583
|
py
|
Python
|
tests/terraform/checks/resource/azure/test_NetworkWatcherFlowLogPeriod.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 4,013
|
2019-12-09T13:16:54.000Z
|
2022-03-31T14:31:01.000Z
|
tests/terraform/checks/resource/azure/test_NetworkWatcherFlowLogPeriod.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 1,258
|
2019-12-17T09:55:51.000Z
|
2022-03-31T19:17:17.000Z
|
tests/terraform/checks/resource/azure/test_NetworkWatcherFlowLogPeriod.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 638
|
2019-12-19T08:57:38.000Z
|
2022-03-30T21:38:37.000Z
|
import unittest
import hcl2
from checkov.terraform.checks.resource.azure.NetworkWatcherFlowLogPeriod import check
from checkov.common.models.enums import CheckResult
class TestNetworkWatcherFlowLogPeriod(unittest.TestCase):
def test_failure(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_watcher_flow_log" "test" {
network_watcher_name = azurerm_network_watcher.test.name
resource_group_name = azurerm_resource_group.test.name
network_security_group_id = azurerm_network_security_group.test.id
storage_account_id = azurerm_storage_account.test.id
enabled = true
retention_policy {
enabled = true
days = 7
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_watcher_flow_log']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_invalid_days_string(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_watcher_flow_log" "test" {
network_watcher_name = azurerm_network_watcher.test.name
resource_group_name = azurerm_resource_group.test.name
network_security_group_id = azurerm_network_security_group.test.id
storage_account_id = azurerm_storage_account.test.id
enabled = true
retention_policy {
enabled = true
days = var.watcher_flow_logs.days
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_watcher_flow_log']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_watcher_flow_log" "test" {
network_watcher_name = azurerm_network_watcher.test.name
resource_group_name = azurerm_resource_group.test.name
network_security_group_id = azurerm_network_security_group.test.id
storage_account_id = azurerm_storage_account.test.id
enabled = true
retention_policy {
enabled = true
days = 90
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_watcher_flow_log']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_with_0_days(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_watcher_flow_log" "test" {
network_watcher_name = azurerm_network_watcher.test.name
resource_group_name = azurerm_resource_group.test.name
network_security_group_id = azurerm_network_security_group.test.id
storage_account_id = azurerm_storage_account.test.id
enabled = true
retention_policy {
enabled = true
days = 0
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_watcher_flow_log']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_with_valid_day_string(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_watcher_flow_log" "test" {
network_watcher_name = azurerm_network_watcher.test.name
resource_group_name = azurerm_resource_group.test.name
network_security_group_id = azurerm_network_security_group.test.id
storage_account_id = azurerm_storage_account.test.id
enabled = true
retention_policy {
enabled = true
days = "100"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_watcher_flow_log']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 40.201754
| 90
| 0.600698
| 463
| 4,583
| 5.514039
| 0.131749
| 0.109675
| 0.123384
| 0.097924
| 0.884841
| 0.882099
| 0.882099
| 0.882099
| 0.882099
| 0.882099
| 0
| 0.006165
| 0.327515
| 4,583
| 113
| 91
| 40.557522
| 0.822193
| 0
| 0
| 0.706522
| 0
| 0
| 0.669867
| 0.251146
| 0
| 0
| 0
| 0
| 0.054348
| 1
| 0.054348
| false
| 0.032609
| 0.043478
| 0
| 0.108696
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7e2a8b7bc60b8485d0f633eb55f0139b15328022
| 15,467
|
py
|
Python
|
openweave/tlv/schema/tests/test_INTEGER.py
|
robszewczyk/openweave-tlv-schema
|
c0acbccce4fcaf213a09261f79d6a141ae94f7e8
|
[
"Apache-2.0"
] | 1
|
2020-05-19T22:52:27.000Z
|
2020-05-19T22:52:27.000Z
|
openweave/tlv/schema/tests/test_INTEGER.py
|
robszewczyk/openweave-tlv-schema
|
c0acbccce4fcaf213a09261f79d6a141ae94f7e8
|
[
"Apache-2.0"
] | null | null | null |
openweave/tlv/schema/tests/test_INTEGER.py
|
robszewczyk/openweave-tlv-schema
|
c0acbccce4fcaf213a09261f79d6a141ae94f7e8
|
[
"Apache-2.0"
] | 1
|
2021-02-15T16:14:17.000Z
|
2021-02-15T16:14:17.000Z
|
#!/usr/bin/env python3
#
# Copyright (c) 2020 Google LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Unit tests for INTEGER types.
#
import unittest
from .testutils import TLVSchemaTestCase
class Test_INTEGER(TLVSchemaTestCase):
def test_INTEGER_EnumeratedValues(self):
schemaText = '''
int => INTEGER
{
v0 = 0,
v1 = 1,
v2 = 2,
v3 = 9223372036854775807,
v4 = -9223372036854775808,
v5 = 0x7FFFFFFFFFFFFFFF,
v6 = -0x7FFFFFFFFFFFFFFF,
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertNoErrors(errs)
schemaText = '''
int => UNSIGNED INTEGER
{
v0 = 0,
v1 = 1,
v2 = 2,
v3 = 18446744073709551615,
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertNoErrors(errs)
def test_INTEGER_EnumeratedValues_OutOfRange_Default(self):
schemaText = '''
int => INTEGER
{
too-small = -9223372036854775809,
too-big = 9223372036854775808,
just-right-1 = -9223372036854775808,
just-right-2 = 0,
just-right-3 = 9223372036854775807,
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 2)
self.assertError(errs, 'enumerated integer value out of range: 9223372036854775808')
self.assertError(errs, 'enumerated integer value out of range: -9223372036854775809')
schemaText = '''
int => UNSIGNED INTEGER
{
too-small-1 = -1,
too-small-2 = -9223372036854775809,
too-big-1 = 18446744073709551616,
too-big-2 = 184467440737095516160,
just-right-1 = 0,
just-right-2 = 9223372036854775808,
just-right-3 = 18446744073709551615,
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 4)
self.assertError(errs, 'enumerated integer value out of range: -1')
self.assertError(errs, 'enumerated integer value out of range: -9223372036854775809')
self.assertError(errs, 'enumerated integer value out of range: 18446744073709551616')
self.assertError(errs, 'enumerated integer value out of range: 184467440737095516160')
def test_INTEGER_EnumeratedValues_OutOfRange_8Bit(self):
schemaText = '''
int => INTEGER [ range 8bit ]
{
too-small-1 = -9223372036854775809,
too-small-2 = -65535,
too-small-3 = -129,
too-big-1 = 9223372036854775808,
too-big-2 = 65535,
too-big-3 = 128,
just-right-1 = 0,
just-right-2 = 127,
just-right-3 = -128,
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 6)
self.assertError(errs, 'enumerated integer value out of range: 9223372036854775808')
self.assertError(errs, 'enumerated integer value out of range: -65535')
self.assertError(errs, 'enumerated integer value out of range: -129')
self.assertError(errs, 'enumerated integer value out of range: 9223372036854775808')
self.assertError(errs, 'enumerated integer value out of range: 65535')
self.assertError(errs, 'enumerated integer value out of range: 128')
schemaText = '''
int => UNSIGNED INTEGER [ range 8bit ]
{
too-small-1 = -9223372036854775809,
too-small-2 = -65535,
too-small-3 = -129,
too-big-1 = 9223372036854775808,
too-big-2 = 65535,
too-big-3 = 256,
just-right-1 = 0,
just-right-2 = 127,
just-right-3 = 255,
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 6)
self.assertError(errs, 'enumerated integer value out of range: 9223372036854775808')
self.assertError(errs, 'enumerated integer value out of range: -65535')
self.assertError(errs, 'enumerated integer value out of range: -129')
self.assertError(errs, 'enumerated integer value out of range: 9223372036854775808')
self.assertError(errs, 'enumerated integer value out of range: 65535')
self.assertError(errs, 'enumerated integer value out of range: 256')
def test_INTEGER_EnumeratedValues_OutOfRange_16Bit(self):
schemaText = '''
int => INTEGER [ range 16bit ]
{
too-small-1 = -9223372036854775809,
too-small-2 = -4294967295,
too-small-3 = -32769,
too-big-1 = 9223372036854775808,
too-big-2 = 65535,
too-big-3 = 32768,
just-right-1 = 0,
just-right-2 = 32767,
just-right-3 = -32768,
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 6)
self.assertError(errs, 'enumerated integer value out of range: -9223372036854775809')
self.assertError(errs, 'enumerated integer value out of range: -4294967295')
self.assertError(errs, 'enumerated integer value out of range: -32769')
self.assertError(errs, 'enumerated integer value out of range: 9223372036854775808')
self.assertError(errs, 'enumerated integer value out of range: 65535')
self.assertError(errs, 'enumerated integer value out of range: 32768')
schemaText = '''
int => UNSIGNED INTEGER [ range 16bit ]
{
too-small-1 = -9223372036854775809,
too-small-2 = -65535,
too-small-3 = -1,
too-big-1 = 9223372036854775808,
too-big-2 = 4294967295,
too-big-3 = 65536,
just-right-1 = 0,
just-right-2 = 32768,
just-right-3 = 65535,
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 6)
self.assertError(errs, 'enumerated integer value out of range: -9223372036854775809')
self.assertError(errs, 'enumerated integer value out of range: -65535')
self.assertError(errs, 'enumerated integer value out of range: -1')
self.assertError(errs, 'enumerated integer value out of range: 9223372036854775808')
self.assertError(errs, 'enumerated integer value out of range: 4294967295')
self.assertError(errs, 'enumerated integer value out of range: 65536')
def test_INTEGER_EnumeratedValues_OutOfRange_32Bit(self):
schemaText = '''
int => INTEGER [ range 32bit ]
{
too-small-1 = -18446744073709551616,
too-small-2 = -9223372036854775808,
too-small-3 = -2147483649,
too-big-1 = 18446744073709551616,
too-big-2 = 9223372036854775808,
too-big-3 = 2147483648,
just-right-1 = 0,
just-right-2 = 2147483647,
just-right-3 = -2147483648,
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 6)
self.assertError(errs, 'enumerated integer value out of range: -18446744073709551616')
self.assertError(errs, 'enumerated integer value out of range: -9223372036854775808')
self.assertError(errs, 'enumerated integer value out of range: -2147483649')
self.assertError(errs, 'enumerated integer value out of range: 18446744073709551616')
self.assertError(errs, 'enumerated integer value out of range: 9223372036854775808')
self.assertError(errs, 'enumerated integer value out of range: 2147483648')
schemaText = '''
int => UNSIGNED INTEGER [ range 32bit ]
{
too-small-1 = -18446744073709551616,
too-small-2 = -9223372036854775808,
too-small-3 = -1,
too-big-1 = 184467440737095516160,
too-big-2 = 18446744073709551616,
too-big-3 = 4294967296,
just-right-1 = 0,
just-right-2 = 2147483648,
just-right-3 = 4294967295,
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 6)
self.assertError(errs, 'enumerated integer value out of range: -18446744073709551616')
self.assertError(errs, 'enumerated integer value out of range: -9223372036854775808')
self.assertError(errs, 'enumerated integer value out of range: -1')
self.assertError(errs, 'enumerated integer value out of range: 184467440737095516160')
self.assertError(errs, 'enumerated integer value out of range: 18446744073709551616')
self.assertError(errs, 'enumerated integer value out of range: 4294967296')
def test_INTEGER_EnumeratedValues_OutOfRange_64bit(self):
schemaText = '''
int => INTEGER [ range 64bit ]
{
too-small = -9223372036854775809,
too-big = 9223372036854775808,
just-right-1 = -9223372036854775808,
just-right-2 = 0,
just-right-3 = 9223372036854775807,
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 2)
self.assertError(errs, 'enumerated integer value out of range: 9223372036854775808')
self.assertError(errs, 'enumerated integer value out of range: -9223372036854775809')
schemaText = '''
int => UNSIGNED INTEGER [ range 64bit ]
{
too-small-1 = -1,
too-small-2 = -9223372036854775809,
too-big-1 = 18446744073709551616,
too-big-2 = 184467440737095516160,
just-right-1 = 0,
just-right-2 = 9223372036854775808,
just-right-3 = 18446744073709551615,
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 4)
self.assertError(errs, 'enumerated integer value out of range: -1')
self.assertError(errs, 'enumerated integer value out of range: -9223372036854775809')
self.assertError(errs, 'enumerated integer value out of range: 18446744073709551616')
self.assertError(errs, 'enumerated integer value out of range: 184467440737095516160')
def test_INTEGER_EnumeratedValues_OutOfRange_UpperLower(self):
schemaText = '''
int => INTEGER [ range -42..87265838912 ]
{
too-small-1 = -9223372036854775809,
too-small-2 = -9223372036854775808,
too-small-3 = -43,
too-big-1 = 9223372036854775808,
too-big-2 = 9223372036854775807,
too-big-3 = 87265838913,
just-right-1 = -42,
just-right-2 = -10,
just-right-3 = 0,
just-right-4 = 42,
just-right-5 = 87265838912,
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 6)
self.assertError(errs, 'enumerated integer value out of range: -9223372036854775809')
self.assertError(errs, 'enumerated integer value out of range: -9223372036854775808')
self.assertError(errs, 'enumerated integer value out of range: -43')
self.assertError(errs, 'enumerated integer value out of range: 9223372036854775808')
self.assertError(errs, 'enumerated integer value out of range: 9223372036854775807')
self.assertError(errs, 'enumerated integer value out of range: 87265838913')
schemaText = '''
int => UNSIGNED INTEGER [ range 42..87265838912 ]
{
too-small-1 = -1,
too-small-2 = 0,
too-small-2 = 41,
too-big-1 = 87265838913,
too-big-2 = 18446744073709551616,
too-big-3 = 184467440737095516160,
just-right-1 = 42,
just-right-2 = 100,
just-right-3 = 87265838912,
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 6)
self.assertError(errs, 'enumerated integer value out of range: -1')
self.assertError(errs, 'enumerated integer value out of range: 0')
self.assertError(errs, 'enumerated integer value out of range: 41')
self.assertError(errs, 'enumerated integer value out of range: 87265838913')
self.assertError(errs, 'enumerated integer value out of range: 18446744073709551616')
self.assertError(errs, 'enumerated integer value out of range: 184467440737095516160')
if __name__ == '__main__':
unittest.main()
| 48.034161
| 94
| 0.52667
| 1,371
| 15,467
| 5.91612
| 0.112327
| 0.11096
| 0.14055
| 0.214523
| 0.843916
| 0.800025
| 0.800025
| 0.743435
| 0.723955
| 0.717667
| 0
| 0.215162
| 0.388505
| 15,467
| 321
| 95
| 48.183801
| 0.642419
| 0.042865
| 0
| 0.620072
| 0
| 0
| 0.699655
| 0.050125
| 0
| 0
| 0.002435
| 0
| 0.265233
| 1
| 0.02509
| false
| 0
| 0.007168
| 0
| 0.035842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
0e9d9dfedab32c71009009d345a92476cd8b12f8
| 80,132
|
py
|
Python
|
test/test_variable_info.py
|
mayankjuneja/censusdata
|
e3fdb0ff77a8228b69aebfd17e6019cc54d91915
|
[
"MIT"
] | null | null | null |
test/test_variable_info.py
|
mayankjuneja/censusdata
|
e3fdb0ff77a8228b69aebfd17e6019cc54d91915
|
[
"MIT"
] | null | null | null |
test/test_variable_info.py
|
mayankjuneja/censusdata
|
e3fdb0ff77a8228b69aebfd17e6019cc54d91915
|
[
"MIT"
] | null | null | null |
"""" Test showing information on variables from Census API.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import censusdata
import unittest
from collections import OrderedDict
import io
import textwrap
import re
class TestVariableInfo(unittest.TestCase):
def test_censusvar_acs5(self):
expected = {'B01001_001E': ['B01001. Sex by Age', 'Total:', 'int'],
'B01002_001E': ['B01002. Median Age by Sex', 'Median age --!!Total:', 'int'],
'B19013_001E': ['B19013. Median Household Income',
'Median household income in the past 12 months (in 2009 inflation-adjusted dollars)', 'int']}
self.assertEqual(censusdata.censusvar('acs5', 2009, ['B01001_001E', 'B01002_001E', 'B19013_001E']), expected)
for year in range(2010, 2019+1):
concepts = ['SEX BY AGE', 'MEDIAN AGE BY SEX', 'MEDIAN HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN {0} INFLATION-ADJUSTED DOLLARS)'.format(year)]
types = ['int', 'float', 'int']
if year == 2016: types = ['', 'int', 'int']
if year == 2017: types = ['', 'float', 'int']
inflation = 'inflation'
if year == 2014 or year == 2015: inflation = 'Inflation'
median_age_label = 'Estimate!!Median age!!Total'
if year == 2018: median_age_label = 'Estimate!!Median age --!!Total'
if year == 2019: median_age_label = 'Estimate!!Median age --!!Total:'
total_label = 'Estimate!!Total'
if year == 2019: total_label = 'Estimate!!Total:'
expected = {'B01001_001E': [concepts[0], total_label, types[0]],
'B01002_001E': [concepts[1], median_age_label, types[1]],
'B19013_001E': [concepts[2],
'Estimate!!Median household income in the past 12 months (in {0} {1}-adjusted dollars)'.format(year, inflation), types[2]]}
self.assertEqual(censusdata.censusvar('acs5', year, ['B01001_001E', 'B01002_001E', 'B19013_001E']), expected)
expected = {'C24010_001E': ['SEX BY OCCUPATION FOR THE CIVILIAN EMPLOYED POPULATION 16 YEARS AND OVER',
'Estimate!!Total', 'int']}
self.assertEqual(censusdata.censusvar('acs5', 2018, ['C24010_001E']), expected)
expected = {'C24010_001E': ['SEX BY OCCUPATION FOR THE CIVILIAN EMPLOYED POPULATION 16 YEARS AND OVER',
'Estimate!!Total:', 'int']}
self.assertEqual(censusdata.censusvar('acs5', 2019, ['C24010_001E']), expected)
def test_censusvar_acs1(self):
expected = {'S0101_C02_001E': ['AGE AND SEX', 'Male!!Estimate!!Total population', 'int'],
'DP03_0021PE': ['SELECTED ECONOMIC CHARACTERISTICS', 'Percent!!COMMUTING TO WORK!!Workers 16 years and over!!Public transportation (excluding taxicab)', 'float'],
'CP02_2012_030E': ['COMPARATIVE SOCIAL CHARACTERISTICS IN THE UNITED STATES', '2012 Estimate!!MARITAL STATUS!!Females 15 years and over', 'int']}
self.assertEqual(censusdata.censusvar('acs1', 2015, ['S0101_C02_001E', 'DP03_0021PE', 'CP02_2012_030E']), expected)
expected = {'S0101_C02_001E': ['', 'Male!!Estimate!!Total population', 'int'],
'DP03_0021PE': ['', 'Percent!!COMMUTING TO WORK!!Workers 16 years and over!!Public transportation (excluding taxicab)', 'int'],
'CP02_2012_030E': ['', '2012 Estimate!!MARITAL STATUS!!Females 15 years and over', 'int']}
self.assertEqual(censusdata.censusvar('acs1', 2016, ['S0101_C02_001E', 'DP03_0021PE', 'CP02_2012_030E']), expected)
expected = {'S0101_C02_001E': ['AGE AND SEX', 'Percent!!Estimate!!Total population', 'int'],
'DP03_0021PE': ['SELECTED ECONOMIC CHARACTERISTICS', 'Percent!!COMMUTING TO WORK!!Workers 16 years and over!!Public transportation (excluding taxicab)', 'float'],
'CP02_2013_030E': ['COMPARATIVE SOCIAL CHARACTERISTICS IN THE UNITED STATES', '2013 Estimate!!MARITAL STATUS!!Females 15 years and over', 'int']}
self.assertEqual(censusdata.censusvar('acs1', 2017, ['S0101_C02_001E', 'DP03_0021PE', 'CP02_2013_030E']), expected)
expected = {'S0101_C02_001E': ['AGE AND SEX', 'Estimate!!Percent!!Total population', 'int'],
'DP03_0021PE': ['SELECTED ECONOMIC CHARACTERISTICS', 'Percent Estimate!!COMMUTING TO WORK!!Workers 16 years and over!!Public transportation (excluding taxicab)', 'float'],
'CP02_2014_030E': ['COMPARATIVE SOCIAL CHARACTERISTICS IN THE UNITED STATES', '2014 Estimate!!MARITAL STATUS!!Females 15 years and over', 'int']}
self.assertEqual(censusdata.censusvar('acs1', 2018, ['S0101_C02_001E', 'DP03_0021PE', 'CP02_2014_030E']), expected)
expected = {'S0101_C02_001E': ['AGE AND SEX', 'Estimate!!Percent!!Total population', 'int'],
'DP03_0021PE': ['SELECTED ECONOMIC CHARACTERISTICS', 'Percent!!COMMUTING TO WORK!!Workers 16 years and over!!Public transportation (excluding taxicab)', 'float'],
'CP02_2015_030E': ['COMPARATIVE SOCIAL CHARACTERISTICS IN THE UNITED STATES', '2015 Estimate!!MARITAL STATUS!!Males 15 years and over!!Divorced', 'float']}
self.assertEqual(censusdata.censusvar('acs1', 2019, ['S0101_C02_001E', 'DP03_0021PE', 'CP02_2015_030E']), expected)
def test_censusvar_acsse(self):
for year in range(2014, 2019+1):
label = 'Estimate!!Total!!No computer'
if year == 2019: label = 'Estimate!!Total:!!No computer'
expected = {'K202801_006E': ['PRESENCE OF A COMPUTER AND TYPE OF INTERNET SUBSCRIPTION IN HOUSEHOLD', label, 'int']}
self.assertEqual(censusdata.censusvar('acsse', year, ['K202801_006E']), expected)
def test_censusvar_acs3(self):
for year in range(2013, 2013+1):
expected = {'B19013_001E': ['B19013. Median Household Income'.format(year),
'Median household income in the past 12 months (in {0} inflation-adjusted dollars)'.format(year), 'int']}
self.assertEqual(censusdata.censusvar('acs3', year, ['B19013_001E']), expected)
def test_censusvar_sf1(self):
self.assertEqual(censusdata.censusvar('sf1', 2010, ['P001001']),
{'P001001': ['TOTAL POPULATION', 'Total', '']})
def test_unknownvar(self):
self.assertRaises(ValueError, censusdata.censusvar, 'acs5', 2015, ['B19013_010E'])
def test_censustable_acs1_201219_detail(self):
for year in range(2012, 2019+1):
predicateType = 'int'
if year == 2012: predicateType = ''
concept = 'B23025. Employment Status for the Population 16 Years and Over'
if year == 2016 or year == 2017 or year == 2018 or year == 2019: concept = 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER'
variable_labels = [
('B23025_001E', 'Total:'),
('B23025_001M', 'Margin of Error for!!Total:'),
('B23025_002E', 'In labor force:'),
('B23025_002M', 'Margin of Error for!!In labor force:'),
('B23025_003E', 'In labor force:!!Civilian labor force:'),
('B23025_003M', 'Margin of Error for!!In labor force:!!Civilian labor force:'),
('B23025_004E', 'In labor force:!!Civilian labor force:!!Employed'),
('B23025_004M', 'Margin of Error for!!In labor force:!!Civilian labor force:!!Employed'),
('B23025_005E', 'In labor force:!!Civilian labor force:!!Unemployed'),
('B23025_005M', 'Margin of Error for!!In labor force:!!Civilian labor force:!!Unemployed'),
('B23025_006E', 'In labor force:!!Armed Forces'),
('B23025_006M', 'Margin of Error for!!In labor force:!!Armed Forces'),
('B23025_007E', 'Not in labor force'),
('B23025_007M', 'Margin of Error for!!Not in labor force'),
]
if year == 2016 or year == 2017 or year == 2018:
variable_labels = [
('B23025_001E', 'Estimate!!Total'),
('B23025_002E', 'Estimate!!Total!!In labor force'),
('B23025_003E', 'Estimate!!Total!!In labor force!!Civilian labor force'),
('B23025_004E', 'Estimate!!Total!!In labor force!!Civilian labor force!!Employed'),
('B23025_005E', 'Estimate!!Total!!In labor force!!Civilian labor force!!Unemployed'),
('B23025_006E', 'Estimate!!Total!!In labor force!!Armed Forces'),
('B23025_007E', 'Estimate!!Total!!Not in labor force'),
]
if year == 2019:
variable_labels = [
('B23025_001E', 'Estimate!!Total:'),
('B23025_002E', 'Estimate!!Total:!!In labor force:'),
('B23025_003E', 'Estimate!!Total:!!In labor force:!!Civilian labor force:'),
('B23025_004E', 'Estimate!!Total:!!In labor force:!!Civilian labor force:!!Employed'),
('B23025_005E', 'Estimate!!Total:!!In labor force:!!Civilian labor force:!!Unemployed'),
('B23025_006E', 'Estimate!!Total:!!In labor force:!!Armed Forces'),
('B23025_007E', 'Estimate!!Total:!!Not in labor force'),
]
expected = OrderedDict()
for variable, label in variable_labels:
expected[variable] = {'label': label, 'concept': concept, 'predicateType': predicateType}
self.assertEqual(censusdata.censustable('acs1', year, 'B23025'), expected)
def test_censustable_acs5_2015_detail(self):
expected = OrderedDict()
expected['B23025_001E'] = {'label': 'Total:', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': 'int'}
expected['B23025_001M'] = {'label': 'Margin Of Error For!!Total:', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': 'int'}
expected['B23025_002E'] = {'label': 'In labor force:', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': 'int'}
expected['B23025_002M'] = { 'label': 'Margin Of Error For!!In labor force:', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': 'int'}
expected['B23025_003E'] = {'label': 'In labor force:!!Civilian labor force:', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': 'int'}
expected['B23025_003M'] = {'label': 'Margin Of Error For!!In labor force:!!Civilian labor force:', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': 'int'}
expected['B23025_004E'] = {'label': 'In labor force:!!Civilian labor force:!!Employed', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': 'int'}
expected['B23025_004M'] = {'label': 'Margin Of Error For!!In labor force:!!Civilian labor force:!!Employed', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': 'int'}
expected['B23025_005E'] = {'label': 'In labor force:!!Civilian labor force:!!Unemployed', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': 'int'}
expected['B23025_005M'] = {'label': 'Margin Of Error For!!In labor force:!!Civilian labor force:!!Unemployed', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': 'int'}
expected['B23025_006E'] = {'label': 'In labor force:!!Armed Forces', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': 'int'}
expected['B23025_006M'] = { 'label': 'Margin Of Error For!!In labor force:!!Armed Forces', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': 'int'}
expected['B23025_007E'] = {'label': 'Not in labor force', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': 'int'}
expected['B23025_007M'] = {'label': 'Margin Of Error For!!Not in labor force', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': 'int'}
self.assertEqual(censusdata.censustable('acs5', 2015, 'B23025'), expected)
def test_censustable_acs5_2016_detail(self):
expected = OrderedDict()
expected['B23025_001E'] = {'label': 'Estimate!!Total', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_002E'] = {'label': 'Estimate!!Total!!In labor force', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_003E'] = {'label': 'Estimate!!Total!!In labor force!!Civilian labor force', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_004E'] = {'label': 'Estimate!!Total!!In labor force!!Civilian labor force!!Employed', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_005E'] = {'label': 'Estimate!!Total!!In labor force!!Civilian labor force!!Unemployed', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_006E'] = {'label': 'Estimate!!Total!!In labor force!!Armed Forces', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_007E'] = {'label': 'Estimate!!Total!!Not in labor force', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
self.assertEqual(censusdata.censustable('acs5', 2016, 'B23025'), expected)
def test_censustable_acs5_2017_detail(self):
expected = OrderedDict()
expected['B23025_001E'] = {'label': 'Estimate!!Total', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_002E'] = {'label': 'Estimate!!Total!!In labor force', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_003E'] = {'label': 'Estimate!!Total!!In labor force!!Civilian labor force', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_004E'] = {'label': 'Estimate!!Total!!In labor force!!Civilian labor force!!Employed', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_005E'] = {'label': 'Estimate!!Total!!In labor force!!Civilian labor force!!Unemployed', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_006E'] = {'label': 'Estimate!!Total!!In labor force!!Armed Forces', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_007E'] = {'label': 'Estimate!!Total!!Not in labor force', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
self.assertEqual(censusdata.censustable('acs5', 2017, 'B23025'), expected)
def test_censustable_acs5_2018_detail(self):
expected = OrderedDict()
expected['B23025_001E'] = {'label': 'Estimate!!Total', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_002E'] = {'label': 'Estimate!!Total!!In labor force', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_003E'] = {'label': 'Estimate!!Total!!In labor force!!Civilian labor force', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_004E'] = {'label': 'Estimate!!Total!!In labor force!!Civilian labor force!!Employed', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_005E'] = {'label': 'Estimate!!Total!!In labor force!!Civilian labor force!!Unemployed', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_006E'] = {'label': 'Estimate!!Total!!In labor force!!Armed Forces', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_007E'] = {'label': 'Estimate!!Total!!Not in labor force', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
self.assertEqual(censusdata.censustable('acs5', 2018, 'B23025'), expected)
expected = OrderedDict()
expected['C15010_001E'] = {'label': 'Estimate!!Total', 'concept': "FIELD OF BACHELOR'S DEGREE FOR FIRST MAJOR FOR THE POPULATION 25 YEARS AND OVER", 'predicateType': 'int'}
expected['C15010_002E'] = {'label': 'Estimate!!Total!!Science and Engineering', 'concept': "FIELD OF BACHELOR'S DEGREE FOR FIRST MAJOR FOR THE POPULATION 25 YEARS AND OVER", 'predicateType': 'int'}
expected['C15010_003E'] = {'label': 'Estimate!!Total!!Science and Engineering Related Fields', 'concept': "FIELD OF BACHELOR'S DEGREE FOR FIRST MAJOR FOR THE POPULATION 25 YEARS AND OVER", 'predicateType': 'int'}
expected['C15010_004E'] = {'label': 'Estimate!!Total!!Business', 'concept': "FIELD OF BACHELOR'S DEGREE FOR FIRST MAJOR FOR THE POPULATION 25 YEARS AND OVER", 'predicateType': 'int'}
expected['C15010_005E'] = {'label': 'Estimate!!Total!!Education', 'concept': "FIELD OF BACHELOR'S DEGREE FOR FIRST MAJOR FOR THE POPULATION 25 YEARS AND OVER", 'predicateType': 'int'}
expected['C15010_006E'] = {'label': 'Estimate!!Total!!Arts, Humanities and Other', 'concept': "FIELD OF BACHELOR'S DEGREE FOR FIRST MAJOR FOR THE POPULATION 25 YEARS AND OVER", 'predicateType': 'int'}
self.assertEqual(censusdata.censustable('acs5', 2018, 'C15010'), expected)
def test_censustable_acs5_2019_detail(self):
expected = OrderedDict()
expected['B23025_001E'] = {'label': 'Estimate!!Total:', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_002E'] = {'label': 'Estimate!!Total:!!In labor force:', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_003E'] = {'label': 'Estimate!!Total:!!In labor force:!!Civilian labor force:', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_004E'] = {'label': 'Estimate!!Total:!!In labor force:!!Civilian labor force:!!Employed', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_005E'] = {'label': 'Estimate!!Total:!!In labor force:!!Civilian labor force:!!Unemployed', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_006E'] = {'label': 'Estimate!!Total:!!In labor force:!!Armed Forces', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
expected['B23025_007E'] = {'label': 'Estimate!!Total:!!Not in labor force', 'concept': 'EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER', 'predicateType': 'int'}
self.assertEqual(censusdata.censustable('acs5', 2019, 'B23025'), expected)
expected = OrderedDict()
expected['C15010_001E'] = {'label': 'Estimate!!Total:', 'concept': "FIELD OF BACHELOR'S DEGREE FOR FIRST MAJOR FOR THE POPULATION 25 YEARS AND OVER", 'predicateType': 'int'}
expected['C15010_002E'] = {'label': 'Estimate!!Total:!!Science and Engineering', 'concept': "FIELD OF BACHELOR'S DEGREE FOR FIRST MAJOR FOR THE POPULATION 25 YEARS AND OVER", 'predicateType': 'int'}
expected['C15010_003E'] = {'label': 'Estimate!!Total:!!Science and Engineering Related Fields', 'concept': "FIELD OF BACHELOR'S DEGREE FOR FIRST MAJOR FOR THE POPULATION 25 YEARS AND OVER", 'predicateType': 'int'}
expected['C15010_004E'] = {'label': 'Estimate!!Total:!!Business', 'concept': "FIELD OF BACHELOR'S DEGREE FOR FIRST MAJOR FOR THE POPULATION 25 YEARS AND OVER", 'predicateType': 'int'}
expected['C15010_005E'] = {'label': 'Estimate!!Total:!!Education', 'concept': "FIELD OF BACHELOR'S DEGREE FOR FIRST MAJOR FOR THE POPULATION 25 YEARS AND OVER", 'predicateType': 'int'}
expected['C15010_006E'] = {'label': 'Estimate!!Total:!!Arts, Humanities and Other', 'concept': "FIELD OF BACHELOR'S DEGREE FOR FIRST MAJOR FOR THE POPULATION 25 YEARS AND OVER", 'predicateType': 'int'}
self.assertEqual(censusdata.censustable('acs5', 2019, 'C15010'), expected)
def test_censustable_acs5_2015_subject(self):
expected = OrderedDict()
expected['S0101_C02_001E'] = {'label': 'Male!!Total population', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_001EA'] = {'label': 'Male!!Total population', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_001M'] = {'label': 'Male MOE!!Total population', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_001MA'] = {'label': 'Male MOE!!Total population', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_002E'] = {'label': 'Male!!Total population!!AGE!!Under 5 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_002EA'] = {'label': 'Male!!Total population!!AGE!!Under 5 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_002M'] = {'label': 'Male MOE!!Total population!!AGE!!Under 5 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_002MA'] = {'label': 'Male MOE!!Total population!!AGE!!Under 5 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_003E'] = {'label': 'Male!!Total population!!AGE!!5 to 9 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_003EA'] = {'label': 'Male!!Total population!!AGE!!5 to 9 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_003M'] = {'label': 'Male MOE!!Total population!!AGE!!5 to 9 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_003MA'] = {'label': 'Male MOE!!Total population!!AGE!!5 to 9 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_004E'] = {'label': 'Male!!Total population!!AGE!!10 to 14 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_004EA'] = {'label': 'Male!!Total population!!AGE!!10 to 14 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_004M'] = {'label': 'Male MOE!!Total population!!AGE!!10 to 14 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_004MA'] = {'label': 'Male MOE!!Total population!!AGE!!10 to 14 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_005E'] = {'label': 'Male!!Total population!!AGE!!15 to 19 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_005EA'] = {'label': 'Male!!Total population!!AGE!!15 to 19 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_005M'] = {'label': 'Male MOE!!Total population!!AGE!!15 to 19 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_005MA'] = {'label': 'Male MOE!!Total population!!AGE!!15 to 19 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_006E'] = {'label': 'Male!!Total population!!AGE!!20 to 24 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_006EA'] = {'label': 'Male!!Total population!!AGE!!20 to 24 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_006M'] = {'label': 'Male MOE!!Total population!!AGE!!20 to 24 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_006MA'] = {'label': 'Male MOE!!Total population!!AGE!!20 to 24 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_007E'] = {'label': 'Male!!Total population!!AGE!!25 to 29 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_007EA'] = {'label': 'Male!!Total population!!AGE!!25 to 29 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_007M'] = {'label': 'Male MOE!!Total population!!AGE!!25 to 29 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_007MA'] = {'label': 'Male MOE!!Total population!!AGE!!25 to 29 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_008E'] = {'label': 'Male!!Total population!!AGE!!30 to 34 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_008EA'] = {'label': 'Male!!Total population!!AGE!!30 to 34 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_008M'] = {'label': 'Male MOE!!Total population!!AGE!!30 to 34 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_008MA'] = {'label': 'Male MOE!!Total population!!AGE!!30 to 34 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_009E'] = {'label': 'Male!!Total population!!AGE!!35 to 39 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_009EA'] = {'label': 'Male!!Total population!!AGE!!35 to 39 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_009M'] = {'label': 'Male MOE!!Total population!!AGE!!35 to 39 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_009MA'] = {'label': 'Male MOE!!Total population!!AGE!!35 to 39 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_010E'] = {'label': 'Male!!Total population!!AGE!!40 to 44 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_010EA'] = {'label': 'Male!!Total population!!AGE!!40 to 44 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_010M'] = {'label': 'Male MOE!!Total population!!AGE!!40 to 44 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_010MA'] = {'label': 'Male MOE!!Total population!!AGE!!40 to 44 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_011E'] = {'label': 'Male!!Total population!!AGE!!45 to 49 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_011EA'] = {'label': 'Male!!Total population!!AGE!!45 to 49 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_011M'] = {'label': 'Male MOE!!Total population!!AGE!!45 to 49 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_011MA'] = {'label': 'Male MOE!!Total population!!AGE!!45 to 49 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_012E'] = {'label': 'Male!!Total population!!AGE!!50 to 54 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_012EA'] = {'label': 'Male!!Total population!!AGE!!50 to 54 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_012M'] = {'label': 'Male MOE!!Total population!!AGE!!50 to 54 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_012MA'] = {'label': 'Male MOE!!Total population!!AGE!!50 to 54 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_013E'] = {'label': 'Male!!Total population!!AGE!!55 to 59 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_013EA'] = {'label': 'Male!!Total population!!AGE!!55 to 59 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_013M'] = {'label': 'Male MOE!!Total population!!AGE!!55 to 59 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_013MA'] = {'label': 'Male MOE!!Total population!!AGE!!55 to 59 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_014E'] = {'label': 'Male!!Total population!!AGE!!60 to 64 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_014EA'] = {'label': 'Male!!Total population!!AGE!!60 to 64 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_014M'] = {'label': 'Male MOE!!Total population!!AGE!!60 to 64 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_014MA'] = {'label': 'Male MOE!!Total population!!AGE!!60 to 64 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_015E'] = {'label': 'Male!!Total population!!AGE!!65 to 69 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_015EA'] = {'label': 'Male!!Total population!!AGE!!65 to 69 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_015M'] = {'label': 'Male MOE!!Total population!!AGE!!65 to 69 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_015MA'] = {'label': 'Male MOE!!Total population!!AGE!!65 to 69 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_016E'] = {'label': 'Male!!Total population!!AGE!!70 to 74 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_016EA'] = {'label': 'Male!!Total population!!AGE!!70 to 74 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_016M'] = {'label': 'Male MOE!!Total population!!AGE!!70 to 74 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_016MA'] = {'label': 'Male MOE!!Total population!!AGE!!70 to 74 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_017E'] = {'label': 'Male!!Total population!!AGE!!75 to 79 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_017EA'] = {'label': 'Male!!Total population!!AGE!!75 to 79 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_017M'] = {'label': 'Male MOE!!Total population!!AGE!!75 to 79 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_017MA'] = {'label': 'Male MOE!!Total population!!AGE!!75 to 79 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_018E'] = {'label': 'Male!!Total population!!AGE!!80 to 84 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_018EA'] = {'label': 'Male!!Total population!!AGE!!80 to 84 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_018M'] = {'label': 'Male MOE!!Total population!!AGE!!80 to 84 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_018MA'] = {'label': 'Male MOE!!Total population!!AGE!!80 to 84 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_019E'] = {'label': 'Male!!Total population!!AGE!!85 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_019EA'] = {'label': 'Male!!Total population!!AGE!!85 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_019M'] = {'label': 'Male MOE!!Total population!!AGE!!85 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_019MA'] = {'label': 'Male MOE!!Total population!!AGE!!85 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_020E'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!5 to 14 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_020EA'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!5 to 14 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_020M'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!5 to 14 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_020MA'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!5 to 14 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_021E'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!15 to 17 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_021EA'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!15 to 17 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_021M'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!15 to 17 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_021MA'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!15 to 17 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_022E'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!18 to 24 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_022EA'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!18 to 24 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_022M'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!18 to 24 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_022MA'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!18 to 24 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_023E'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!15 to 44 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_023EA'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!15 to 44 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_023M'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!15 to 44 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_023MA'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!15 to 44 years', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_024E'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!16 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_024EA'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!16 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_024M'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!16 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_024MA'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!16 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_025E'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!18 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_025EA'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!18 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_025M'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!18 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_025MA'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!18 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_026E'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!60 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_026EA'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!60 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_026M'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!60 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_026MA'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!60 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_027E'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!62 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_027EA'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!62 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_027M'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!62 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_027MA'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!62 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_028E'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!65 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_028EA'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!65 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_028M'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!65 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_028MA'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!65 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_029E'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!75 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_029EA'] = {'label': 'Male!!Total population!!SELECTED AGE CATEGORIES!!75 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_029M'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!75 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_029MA'] = {'label': 'Male MOE!!Total population!!SELECTED AGE CATEGORIES!!75 years and over', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_030E'] = {'label': 'Male!!Total population!!SUMMARY INDICATORS!!Median age (years)', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_030EA'] = {'label': 'Male!!Total population!!SUMMARY INDICATORS!!Median age (years)', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_030M'] = {'label': 'Male MOE!!Total population!!SUMMARY INDICATORS!!Median age (years)', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_030MA'] = {'label': 'Male MOE!!Total population!!SUMMARY INDICATORS!!Median age (years)', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_031E'] = {'label': 'Male!!Total population!!SUMMARY INDICATORS!!Sex ratio (males per 100 females)', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_031EA'] = {'label': 'Male!!Total population!!SUMMARY INDICATORS!!Sex ratio (males per 100 females)', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_031M'] = {'label': 'Male MOE!!Total population!!SUMMARY INDICATORS!!Sex ratio (males per 100 females)', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_031MA'] = {'label': 'Male MOE!!Total population!!SUMMARY INDICATORS!!Sex ratio (males per 100 females)', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_032E'] = {'label': 'Male!!Total population!!SUMMARY INDICATORS!!Age dependency ratio', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_032EA'] = {'label': 'Male!!Total population!!SUMMARY INDICATORS!!Age dependency ratio', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_032M'] = {'label': 'Male MOE!!Total population!!SUMMARY INDICATORS!!Age dependency ratio', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_032MA'] = {'label': 'Male MOE!!Total population!!SUMMARY INDICATORS!!Age dependency ratio', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_033E'] = {'label': 'Male!!Total population!!SUMMARY INDICATORS!!Old-age dependency ratio', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_033EA'] = {'label': 'Male!!Total population!!SUMMARY INDICATORS!!Old-age dependency ratio', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_033M'] = {'label': 'Male MOE!!Total population!!SUMMARY INDICATORS!!Old-age dependency ratio', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_033MA'] = {'label': 'Male MOE!!Total population!!SUMMARY INDICATORS!!Old-age dependency ratio', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_034E'] = {'label': 'Male!!Total population!!SUMMARY INDICATORS!!Child dependency ratio', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_034EA'] = {'label': 'Male!!Total population!!SUMMARY INDICATORS!!Child dependency ratio', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_034M'] = {'label': 'Male MOE!!Total population!!SUMMARY INDICATORS!!Child dependency ratio', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_034MA'] = {'label': 'Male MOE!!Total population!!SUMMARY INDICATORS!!Child dependency ratio', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_035E'] = {'label': 'Male!!PERCENT IMPUTED!!Sex', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_035EA'] = {'label': 'Male!!PERCENT IMPUTED!!Sex', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_035M'] = {'label': 'Male MOE!!PERCENT IMPUTED!!Sex', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_035MA'] = {'label': 'Male MOE!!PERCENT IMPUTED!!Sex', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_036E'] = {'label': 'Male!!PERCENT IMPUTED!!Age', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_036EA'] = {'label': 'Male!!PERCENT IMPUTED!!Age', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_036M'] = {'label': 'Male MOE!!PERCENT IMPUTED!!Age', 'concept': 'Age and Sex', 'predicateType': 'string'}
expected['S0101_C02_036MA'] = {'label': 'Male MOE!!PERCENT IMPUTED!!Age', 'concept': 'Age and Sex', 'predicateType': 'string'}
self.assertEqual(censusdata.censustable('acs5', 2015, 'S0101_C02'), expected)
def test_censustable_acs5_2016_subject(self):
expected = OrderedDict()
expected['S0101_C02_001E'] = {'label': 'Male!!Estimate!!Total population', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_002E'] = {'label': 'Male!!Estimate!!AGE!!Under 5 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_003E'] = {'label': 'Male!!Estimate!!AGE!!5 to 9 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_004E'] = {'label': 'Male!!Estimate!!AGE!!10 to 14 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_005E'] = {'label': 'Male!!Estimate!!AGE!!15 to 19 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_006E'] = {'label': 'Male!!Estimate!!AGE!!20 to 24 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_007E'] = {'label': 'Male!!Estimate!!AGE!!25 to 29 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_008E'] = {'label': 'Male!!Estimate!!AGE!!30 to 34 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_009E'] = {'label': 'Male!!Estimate!!AGE!!35 to 39 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_010E'] = {'label': 'Male!!Estimate!!AGE!!40 to 44 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_011E'] = {'label': 'Male!!Estimate!!AGE!!45 to 49 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_012E'] = {'label': 'Male!!Estimate!!AGE!!50 to 54 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_013E'] = {'label': 'Male!!Estimate!!AGE!!55 to 59 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_014E'] = {'label': 'Male!!Estimate!!AGE!!60 to 64 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_015E'] = {'label': 'Male!!Estimate!!AGE!!65 to 69 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_016E'] = {'label': 'Male!!Estimate!!AGE!!70 to 74 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_017E'] = {'label': 'Male!!Estimate!!AGE!!75 to 79 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_018E'] = {'label': 'Male!!Estimate!!AGE!!80 to 84 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_019E'] = {'label': 'Male!!Estimate!!AGE!!85 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_020E'] = {'label': 'Male!!Estimate!!SELECTED AGE CATEGORIES!!5 to 14 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_021E'] = {'label': 'Male!!Estimate!!SELECTED AGE CATEGORIES!!15 to 17 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_022E'] = {'label': 'Male!!Estimate!!SELECTED AGE CATEGORIES!!18 to 24 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_023E'] = {'label': 'Male!!Estimate!!SELECTED AGE CATEGORIES!!15 to 44 years', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_024E'] = {'label': 'Male!!Estimate!!SELECTED AGE CATEGORIES!!16 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_025E'] = {'label': 'Male!!Estimate!!SELECTED AGE CATEGORIES!!18 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_026E'] = {'label': 'Male!!Estimate!!SELECTED AGE CATEGORIES!!60 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_027E'] = {'label': 'Male!!Estimate!!SELECTED AGE CATEGORIES!!62 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_028E'] = {'label': 'Male!!Estimate!!SELECTED AGE CATEGORIES!!65 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_029E'] = {'label': 'Male!!Estimate!!SELECTED AGE CATEGORIES!!75 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_030E'] = {'label': 'Male!!Estimate!!SUMMARY INDICATORS!!Median age (years)', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_031E'] = {'label': 'Male!!Estimate!!SUMMARY INDICATORS!!Sex ratio (males per 100 females)', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_032E'] = {'label': 'Male!!Estimate!!SUMMARY INDICATORS!!Age dependency ratio', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_033E'] = {'label': 'Male!!Estimate!!SUMMARY INDICATORS!!Old-age dependency ratio', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_034E'] = {'label': 'Male!!Estimate!!SUMMARY INDICATORS!!Child dependency ratio', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_035E'] = {'label': 'Male!!Estimate!!PERCENT ALLOCATED!!Sex', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_036E'] = {'label': 'Male!!Estimate!!PERCENT ALLOCATED!!Age', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
self.assertEqual(censusdata.censustable('acs5', 2016, 'S0101_C02'), expected)
def test_censustable_acs5_2017_subject(self):
expected = OrderedDict()
expected['S0101_C02_001E'] = {'label': 'Estimate!!Percent!!Total population', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_002E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!Under 5 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_003E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!5 to 9 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_004E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!10 to 14 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_005E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!15 to 19 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_006E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!20 to 24 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_007E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!25 to 29 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_008E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!30 to 34 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_009E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!35 to 39 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_010E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!40 to 44 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_011E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!45 to 49 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_012E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!50 to 54 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_013E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!55 to 59 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_014E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!60 to 64 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_015E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!65 to 69 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_016E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!70 to 74 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_017E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!75 to 79 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_018E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!80 to 84 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_019E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!85 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_020E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!5 to 14 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_021E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!15 to 17 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_022E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!Under 18 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_023E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!18 to 24 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_024E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!15 to 44 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_025E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!16 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_026E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!18 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_027E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!21 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_028E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!60 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_029E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!62 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_030E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!65 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_031E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!75 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_032E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Median age (years)', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_033E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Sex ratio (males per 100 females)', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_034E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Age dependency ratio', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_035E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Old-age dependency ratio', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_036E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Child dependency ratio', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_037E'] = {'label': 'Estimate!!Percent!!PERCENT ALLOCATED!!Sex', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_038E'] = {'label': 'Estimate!!Percent!!PERCENT ALLOCATED!!Age', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
self.assertEqual(censusdata.censustable('acs5', 2017, 'S0101_C02'), expected)
def test_censustable_acs5_2018_subject(self):
expected = OrderedDict()
expected['S0101_C02_001E'] = {'label': 'Estimate!!Percent!!Total population', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_002E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!Under 5 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_003E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!5 to 9 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_004E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!10 to 14 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_005E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!15 to 19 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_006E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!20 to 24 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_007E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!25 to 29 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_008E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!30 to 34 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_009E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!35 to 39 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_010E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!40 to 44 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_011E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!45 to 49 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_012E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!50 to 54 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_013E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!55 to 59 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_014E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!60 to 64 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_015E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!65 to 69 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_016E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!70 to 74 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_017E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!75 to 79 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_018E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!80 to 84 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_019E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!85 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_020E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!5 to 14 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_021E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!15 to 17 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_022E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!Under 18 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_023E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!18 to 24 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_024E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!15 to 44 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_025E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!16 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_026E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!18 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_027E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!21 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_028E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!60 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_029E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!62 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_030E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!65 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_031E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!75 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_032E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Median age (years)', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_033E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Sex ratio (males per 100 females)', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_034E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Age dependency ratio', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_035E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Old-age dependency ratio', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_036E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Child dependency ratio', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_037E'] = {'label': 'Estimate!!Percent!!PERCENT ALLOCATED!!Sex', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_038E'] = {'label': 'Estimate!!Percent!!PERCENT ALLOCATED!!Age', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
self.assertEqual(censusdata.censustable('acs5', 2018, 'S0101_C02'), expected)
def test_censustable_acs5_2019_subject(self):
expected = OrderedDict()
expected['S0101_C02_001E'] = {'label': 'Estimate!!Percent!!Total population', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_002E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!Under 5 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_003E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!5 to 9 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_004E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!10 to 14 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_005E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!15 to 19 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_006E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!20 to 24 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_007E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!25 to 29 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_008E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!30 to 34 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_009E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!35 to 39 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_010E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!40 to 44 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_011E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!45 to 49 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_012E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!50 to 54 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_013E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!55 to 59 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_014E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!60 to 64 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_015E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!65 to 69 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_016E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!70 to 74 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_017E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!75 to 79 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_018E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!80 to 84 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_019E'] = {'label': 'Estimate!!Percent!!Total population!!AGE!!85 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_020E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!5 to 14 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_021E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!15 to 17 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_022E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!Under 18 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_023E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!18 to 24 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_024E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!15 to 44 years', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_025E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!16 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_026E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!18 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_027E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!21 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_028E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!60 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_029E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!62 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_030E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!65 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_031E'] = {'label': 'Estimate!!Percent!!Total population!!SELECTED AGE CATEGORIES!!75 years and over', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_032E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Median age (years)', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_033E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Sex ratio (males per 100 females)', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_034E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Age dependency ratio', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_035E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Old-age dependency ratio', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_036E'] = {'label': 'Estimate!!Percent!!Total population!!SUMMARY INDICATORS!!Child dependency ratio', 'concept': 'AGE AND SEX', 'predicateType': 'int'}
expected['S0101_C02_037E'] = {'label': 'Estimate!!Percent!!Total population!!PERCENT ALLOCATED!!Sex', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
expected['S0101_C02_038E'] = {'label': 'Estimate!!Percent!!Total population!!PERCENT ALLOCATED!!Age', 'concept': 'AGE AND SEX', 'predicateType': 'float'}
self.assertEqual(censusdata.censustable('acs5', 2019, 'S0101_C02'), expected)
def test_censustable_acsse(self):
expected = OrderedDict()
expected['K201601_001E'] = {'label': 'Total:', 'concept': 'K201601. Household Language', 'predicateType': 'int'}
expected['K201601_001EA'] = {'label': 'Total:', 'concept': 'K201601. Household Language', 'predicateType': 'string'}
expected['K201601_001M'] = {'label': 'Margin of Error for!!Total:', 'concept': 'K201601. Household Language', 'predicateType': 'int'}
expected['K201601_001MA'] = {'label': 'Margin of Error for!!Total:', 'concept': 'K201601. Household Language', 'predicateType': 'string'}
expected['K201601_002E'] = {'label': 'English only', 'concept': 'K201601. Household Language', 'predicateType': 'int'}
expected['K201601_002EA'] = {'label': 'English only', 'concept': 'K201601. Household Language', 'predicateType': 'string'}
expected['K201601_002M'] = {'label': 'Margin of Error for!!English only', 'concept': 'K201601. Household Language', 'predicateType': 'int'}
expected['K201601_002MA'] = {'label': 'Margin of Error for!!English only', 'concept': 'K201601. Household Language', 'predicateType': 'string'}
expected['K201601_003E'] = {'label': 'Spanish:', 'concept': 'K201601. Household Language', 'predicateType': 'int'}
expected['K201601_003EA'] = {'label': 'Spanish:', 'concept': 'K201601. Household Language', 'predicateType': 'string'}
expected['K201601_003M'] = {'label': 'Margin of Error for!!Spanish:', 'concept': 'K201601. Household Language', 'predicateType': 'int'}
expected['K201601_003MA'] = {'label': 'Margin of Error for!!Spanish:', 'concept': 'K201601. Household Language', 'predicateType': 'string'}
expected['K201601_004E'] = {'label': 'Spanish:!!Limited English speaking household', 'concept': 'K201601. Household Language', 'predicateType': 'int'}
expected['K201601_004EA'] = {'label': 'Spanish:!!Limited English speaking household', 'concept': 'K201601. Household Language', 'predicateType': 'string'}
expected['K201601_004M'] = {'label': 'Margin of Error for!!Spanish:!!Limited English speaking household', 'concept': 'K201601. Household Language', 'predicateType': 'int'}
expected['K201601_004MA'] = {'label': 'Margin of Error for!!Spanish:!!Limited English speaking household', 'concept': 'K201601. Household Language',
'predicateType': 'string'}
expected['K201601_005E'] = {'label': 'Spanish:!!Not a limited English speaking household', 'concept': 'K201601. Household Language', 'predicateType': 'int'}
expected['K201601_005EA'] = {'label': 'Spanish:!!Not a limited English speaking household', 'concept': 'K201601. Household Language', 'predicateType': 'string'}
expected['K201601_005M'] = {'label': 'Margin of Error for!!Spanish:!!Not a limited English speaking household', 'concept': 'K201601. Household Language',
'predicateType': 'int'}
expected['K201601_005MA'] = {'label': 'Margin of Error for!!Spanish:!!Not a limited English speaking household', 'concept': 'K201601. Household Language',
'predicateType': 'string'}
expected['K201601_006E'] = {'label': 'Other languages:', 'concept': 'K201601. Household Language', 'predicateType': 'int'}
expected['K201601_006EA'] = {'label': 'Other languages:', 'concept': 'K201601. Household Language', 'predicateType': 'string'}
expected['K201601_006M'] = {'label': 'Margin of Error for!!Other languages:', 'concept': 'K201601. Household Language', 'predicateType': 'int'}
expected['K201601_006MA'] = {'label': 'Margin of Error for!!Other languages:', 'concept': 'K201601. Household Language', 'predicateType': 'string'}
expected['K201601_007E'] = {'label': 'Other languages:!!Limited English speaking household', 'concept': 'K201601. Household Language', 'predicateType': 'int'}
expected['K201601_007EA'] = {'label': 'Other languages:!!Limited English speaking household', 'concept': 'K201601. Household Language', 'predicateType': 'string'}
expected['K201601_007M'] = {'label': 'Margin of Error for!!Other languages:!!Limited English speaking household', 'concept': 'K201601. Household Language',
'predicateType': 'int'}
expected['K201601_007MA'] = {'label': 'Margin of Error for!!Other languages:!!Limited English speaking household', 'concept': 'K201601. Household Language',
'predicateType': 'string'}
expected['K201601_008E'] = {'label': 'Other languages:!!Not a limited English speaking household', 'concept': 'K201601. Household Language', 'predicateType': 'int'}
expected['K201601_008EA'] = {'label': 'Other languages:!!Not a limited English speaking household', 'concept': 'K201601. Household Language', 'predicateType': 'string'}
expected['K201601_008M'] = {'label': 'Margin of Error for!!Other languages:!!Not a limited English speaking household', 'concept': 'K201601. Household Language',
'predicateType': 'int'}
expected['K201601_008MA'] = {'label': 'Margin of Error for!!Other languages:!!Not a limited English speaking household', 'concept': 'K201601. Household Language',
'predicateType': 'string'}
for year in range(2014, 2015+1):
self.assertEqual(censusdata.censustable('acsse', year, 'K201601'), expected)
expected = OrderedDict()
expected['K201601_001E'] = {'label': 'Estimate!!Total', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
expected['K201601_002E'] = {'label': 'Estimate!!Total!!English only', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
expected['K201601_003E'] = {'label': 'Estimate!!Total!!Spanish', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
expected['K201601_004E'] = {'label': 'Estimate!!Total!!Spanish!!Limited English speaking household', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
expected['K201601_005E'] = {'label': 'Estimate!!Total!!Spanish!!Not a limited English speaking household', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
expected['K201601_006E'] = {'label': 'Estimate!!Total!!Other languages', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
expected['K201601_007E'] = {'label': 'Estimate!!Total!!Other languages!!Limited English speaking household', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
expected['K201601_008E'] = {'label': 'Estimate!!Total!!Other languages!!Not a limited English speaking household', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
for year in range(2016, 2018+1):
self.assertEqual(censusdata.censustable('acsse', year, 'K201601'), expected)
expected = OrderedDict()
expected['K201601_001E'] = {'label': 'Estimate!!Total:', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
expected['K201601_002E'] = {'label': 'Estimate!!Total:!!English only', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
expected['K201601_003E'] = {'label': 'Estimate!!Total:!!Spanish:', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
expected['K201601_004E'] = {'label': 'Estimate!!Total:!!Spanish:!!Limited English speaking household', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
expected['K201601_005E'] = {'label': 'Estimate!!Total:!!Spanish:!!Not a limited English speaking household', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
expected['K201601_006E'] = {'label': 'Estimate!!Total:!!Other languages:', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
expected['K201601_007E'] = {'label': 'Estimate!!Total:!!Other languages:!!Limited English speaking household', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
expected['K201601_008E'] = {'label': 'Estimate!!Total:!!Other languages:!!Not a limited English speaking household', 'concept': 'HOUSEHOLD LANGUAGE', 'predicateType': 'int'}
self.assertEqual(censusdata.censustable('acsse', 2019, 'K201601'), expected)
def test_censustable_acs3(self):
for year in range(2012, 2013+1):
predicateType = 'int'
if year == 2012: predicateType = ''
expected = OrderedDict()
expected['B23025_001E'] = {'label': 'Total:', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': predicateType}
expected['B23025_001M'] = {'label': 'Margin of Error for!!Total:', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': predicateType}
expected['B23025_002E'] = {'label': 'In labor force:', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': predicateType}
expected['B23025_002M'] = { 'label': 'Margin of Error for!!In labor force:', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': predicateType}
expected['B23025_003E'] = {'label': 'In labor force:!!Civilian labor force:', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': predicateType}
expected['B23025_003M'] = {'label': 'Margin of Error for!!In labor force:!!Civilian labor force:', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': predicateType}
expected['B23025_004E'] = {'label': 'In labor force:!!Civilian labor force:!!Employed', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': predicateType}
expected['B23025_004M'] = {'label': 'Margin of Error for!!In labor force:!!Civilian labor force:!!Employed', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': predicateType}
expected['B23025_005E'] = {'label': 'In labor force:!!Civilian labor force:!!Unemployed', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': predicateType}
expected['B23025_005M'] = {'label': 'Margin of Error for!!In labor force:!!Civilian labor force:!!Unemployed', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': predicateType}
expected['B23025_006E'] = {'label': 'In labor force:!!Armed Forces', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': predicateType}
expected['B23025_006M'] = { 'label': 'Margin of Error for!!In labor force:!!Armed Forces', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': predicateType}
expected['B23025_007E'] = {'label': 'Not in labor force', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': predicateType}
expected['B23025_007M'] = {'label': 'Margin of Error for!!Not in labor force', 'concept': 'B23025. Employment Status for the Population 16 Years and Over', 'predicateType': predicateType}
self.assertEqual(censusdata.censustable('acs3', year, 'B23025'), expected)
def test_censustable_sf1(self):
expected = OrderedDict()
expected['P0020001'] = {'label': 'Total Population', 'concept': 'P2. Urban And Rural [6]', 'predicateType': ''}
expected['P0020002'] = {'label': 'Urban:', 'concept': 'P2. Urban And Rural [6]', 'predicateType': ''}
expected['P0020003'] = {'label': 'Urban: !! Inside urbanized areas', 'concept': 'P2. Urban And Rural [6]', 'predicateType': ''}
expected['P0020004'] = {'label': 'Urban: !! Inside urban clusters', 'concept': 'P2. Urban And Rural [6]', 'predicateType': ''}
expected['P0020005'] = {'label': 'Rural !! Inside urban clusters', 'concept': 'P2. Urban And Rural [6]', 'predicateType': ''}
expected['P0020006'] = {'label': 'Not defined for this file !! Inside urban clusters', 'concept': 'P2. Urban And Rural [6]', 'predicateType': ''}
self.assertEqual(censusdata.censustable('sf1', 2010, 'P002'), expected)
def test_unknowntable(self):
self.assertRaises(ValueError, censusdata.censustable, 'acs5', 2015, 'B24444')
def test_search(self):
self.assertEqual(censusdata.search('acs5', 2015, 'concept', 'unweighted sample'), [
('B00001_001E', 'B00001. Unweighted Sample Count of the Population', 'Total'),
('B00001_001M', 'B00001. Unweighted Sample Count of the Population', 'Margin Of Error For!!Total'),
('B00002_001E', 'B00002. Unweighted Sample Housing Units', 'Total'),
('B00002_001M', 'B00002. Unweighted Sample Housing Units', 'Margin Of Error For!!Total'),
])
s = censusdata.search('acs5', 2018, 'concept', 'SEX BY AGE')
self.assertEqual(len(s), 2969)
self.assertEqual(s[0], ('B01001A_001E', 'SEX BY AGE (WHITE ALONE)', 'Estimate!!Total'))
self.assertEqual(censusdata.search('acs5', 2015, 'concept',
lambda value: re.search('unweighted sample', value, re.IGNORECASE) and re.search('housing', value, re.IGNORECASE)), [
('B00002_001E', 'B00002. Unweighted Sample Housing Units', 'Total'),
('B00002_001M', 'B00002. Unweighted Sample Housing Units', 'Margin Of Error For!!Total'),
])
def test_printtable(self):
testtable = censusdata.censustable('acs5', 2015, 'B19013')
printedtable = io.StringIO()
sys.stdout = printedtable
censusdata.printtable(testtable)
sys.stdout = sys.__stdout__
self.assertEqual(printedtable.getvalue(), textwrap.dedent(
'''\
Variable | Table | Label | Type
-------------------------------------------------------------------------------------------------------------------
B19013_001E | B19013. Median Household Incom | Median household income in the past 12 months (in 2015 I | int
-------------------------------------------------------------------------------------------------------------------
'''))
printedtable.close()
printedtable = io.StringIO()
sys.stdout = printedtable
censusdata.printtable(testtable, moe=True)
sys.stdout = sys.__stdout__
self.assertEqual(printedtable.getvalue(), textwrap.dedent(
'''\
Variable | Table | Label | Type
-------------------------------------------------------------------------------------------------------------------
B19013_001E | B19013. Median Household Incom | Median household income in the past 12 months (in 2015 I | int
B19013_001M | B19013. Median Household Incom | !! Margin of Error for Median household income in the pa | int
-------------------------------------------------------------------------------------------------------------------
'''))
printedtable.close()
def test_unknown_tabletype(self):
self.assertRaises(ValueError, censusdata.censusvar, 'acs5', 2015, ['B19013_001E', 'D19013_002E'])
self.assertRaises(ValueError, censusdata.censustable, 'acs5', 2015, 'C19013')
self.assertRaises(ValueError, censusdata.search, 'acs5', 2015, 'concept', 'unweighted sample', tabletype='cdetail')
if __name__ == '__main__':
unittest.main()
| 117.668135
| 223
| 0.695615
| 9,960
| 80,132
| 5.503313
| 0.043072
| 0.045099
| 0.087279
| 0.085819
| 0.943043
| 0.933063
| 0.90661
| 0.889971
| 0.877912
| 0.86173
| 0
| 0.088767
| 0.116133
| 80,132
| 680
| 224
| 117.841176
| 0.685144
| 0.000699
| 0
| 0.301887
| 0
| 0.001572
| 0.680985
| 0.057539
| 0
| 0
| 0
| 0
| 0.064465
| 1
| 0.037736
| false
| 0
| 0.012579
| 0
| 0.051887
| 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0eaf16b3df8e4140308856573d4d82ac467779b7
| 149
|
py
|
Python
|
core/world/__init__.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | 3
|
2017-10-28T11:28:38.000Z
|
2018-09-12T09:47:00.000Z
|
core/world/__init__.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | null | null | null |
core/world/__init__.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | null | null | null |
from core.world.area import Area
from core.world.level import Level, LevelStub
from core.world.region import Region
from core.world.room import Room
| 29.8
| 45
| 0.825503
| 25
| 149
| 4.92
| 0.36
| 0.260163
| 0.422764
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114094
| 149
| 4
| 46
| 37.25
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
adee351153acc41bfe9edb69a5bdbb2ab4e4d608
| 25,389
|
py
|
Python
|
myfunctions.py
|
irenekarijadi/RF-LSTM-CEEMDAN
|
3e9e510056985563520061c292a79bd4a7141a0b
|
[
"MIT"
] | 4
|
2022-03-16T20:23:04.000Z
|
2022-03-28T03:30:45.000Z
|
myfunctions.py
|
irenekarijadi/RF-LSTM-CEEMDAN
|
3e9e510056985563520061c292a79bd4a7141a0b
|
[
"MIT"
] | null | null | null |
myfunctions.py
|
irenekarijadi/RF-LSTM-CEEMDAN
|
3e9e510056985563520061c292a79bd4a7141a0b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[24]:
import numpy
import pandas as pd
import tensorflow as tf
from PyEMD import CEEMDAN
import warnings
warnings.filterwarnings("ignore")
### import the libraries
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from math import sqrt
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
def percentage_error(actual, predicted):
res = numpy.empty(actual.shape)
for j in range(actual.shape[0]):
if actual[j] != 0:
res[j] = (actual[j] - predicted[j]) / actual[j]
else:
res[j] = predicted[j] / np.mean(actual)
return res
def mean_absolute_percentage_error(y_true, y_pred):
return numpy.mean(numpy.abs(percentage_error(numpy.asarray(y_true), numpy.asarray(y_pred)))) * 100
# In[25]:
def lr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import tensorflow as tf
numpy.random.seed(1234)
tf.random.set_seed(1234)
from sklearn.linear_model import LinearRegression
grid = LinearRegression()
grid.fit(X,y)
y_pred_train_lr= grid.predict(X)
y_pred_test_lr= grid.predict(X1)
y_pred_train_lr=pd.DataFrame(y_pred_train_lr)
y_pred_test_lr=pd.DataFrame(y_pred_test_lr)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_lr= sc_y.inverse_transform (y_pred_test_lr)
y_pred_train1_lr=sc_y.inverse_transform (y_pred_train_lr)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_rf=pd.DataFrame(y_pred_test1_lr)
y_pred_train1_rf=pd.DataFrame(y_pred_train1_lr)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_lr)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_lr))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_lr)
return mape,rmse,mae
# In[26]:
def svr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.svm import SVR
grid = SVR()
grid.fit(X,y)
y_pred_train_svr= grid.predict(X)
y_pred_test_svr= grid.predict(X1)
y_pred_train_svr=pd.DataFrame(y_pred_train_svr)
y_pred_test_svr=pd.DataFrame(y_pred_test_svr)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_svr= sc_y.inverse_transform (y_pred_test_svr)
y_pred_train1_svr=sc_y.inverse_transform (y_pred_train_svr)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_svr=pd.DataFrame(y_pred_test1_svr)
y_pred_train1_svr=pd.DataFrame(y_pred_train1_svr)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_svr)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_svr))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_svr)
return mape,rmse,mae
# In[27]:
def ann_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.neural_network import MLPRegressor
model= MLPRegressor(random_state=1,activation='tanh').fit(X,y)
numpy.random.seed(1234)
# make predictions
y_pred_train = model.predict(X)
y_pred_test = model.predict(X1)
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y1=pd.DataFrame(y1)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_test= sc_y.inverse_transform (y1)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1))
mae=metrics.mean_absolute_error(y_test,y_pred_test1)
return mape,rmse,mae
# In[28]:
def rf_model(datass,look_back,data_partition,max_features):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.ensemble import RandomForestRegressor
grid = RandomForestRegressor(max_features=max_features)
grid.fit(X,y)
y_pred_train_rf= grid.predict(X)
y_pred_test_rf= grid.predict(X1)
y_pred_train_rf=pd.DataFrame(y_pred_train_rf)
y_pred_test_rf=pd.DataFrame(y_pred_test_rf)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_rf= sc_y.inverse_transform (y_pred_test_rf)
y_pred_train1_rf=sc_y.inverse_transform (y_pred_train_rf)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_rf=pd.DataFrame(y_pred_test1_rf)
y_pred_train1_rf=pd.DataFrame(y_pred_train1_rf)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_rf)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_rf))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_rf)
return mape,rmse,mae
# In[29]:
def lstm_model(datass,look_back,data_partition,max_features,epoch,batch_size,neuron,lr,optimizer):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
trainX1 = numpy.reshape(X, (X.shape[0],1,X.shape[1]))
testX1 = numpy.reshape(X1, (X1.shape[0],1,X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
neuron=neuron
model = Sequential()
model.add(LSTM(units = neuron,input_shape=(trainX1.shape[1], trainX1.shape[2])))
model.add(Dense(1))
optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(loss='mse',optimizer=optimizer)
# model.summary()
# Fitting the RNN to the Training s
model.fit(trainX1, y, epochs = epoch, batch_size = batch_size,verbose=0)
# make predictions
y_pred_train = model.predict(trainX1)
y_pred_test = model.predict(testX1)
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y1=pd.DataFrame(y1)
y_test= sc_y.inverse_transform (y1)
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn import metrics
mape=mean_absolute_percentage_error(y_test,y_pred_test1)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1))
mae=metrics.mean_absolute_error(y_test,y_pred_test1)
return mape,rmse,mae
# In[30]:
###################################################hybrid based ceemdan####################################################
def hybrid_ceemdan_rf(datass,look_back,data_partition,max_features):
import numpy as np
import pandas as pd
dfs=datass
s = dfs.values
emd = CEEMDAN(epsilon=0.05)
emd.noise_seed(12345)
IMFs = emd(s)
full_imf=pd.DataFrame(IMFs)
data_imf=full_imf.T
import pandas as pd
pred_test=[]
test_ori=[]
pred_train=[]
train_ori=[]
for col in data_imf:
datasetss2=pd.DataFrame(data_imf[col])
datasets=datasetss2.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.ensemble import RandomForestRegressor
grid = RandomForestRegressor(max_features=max_features)
grid.fit(X,y)
y_pred_train= grid.predict(X)
y_pred_test= grid.predict(X1)
y_pred_test=pd.DataFrame(y_pred_test)
y_pred_train=pd.DataFrame(y_pred_train)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_pred_train1= sc_y.inverse_transform (y_pred_train)
pred_test.append(y_pred_test1)
test_ori.append(y_test)
pred_train.append(y_pred_train1)
train_ori.append(y_train)
result_pred_test= pd.DataFrame.from_records(pred_test)
result_pred_train= pd.DataFrame.from_records(pred_train)
a=result_pred_test.sum(axis = 0, skipna = True)
b=result_pred_train.sum(axis = 0, skipna = True)
dataframe=pd.DataFrame(dfs)
dataset=dataframe.values
train_size = int(len(dataset) * data_partition)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size], dataset[train_size:len(dataset)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
a= pd.DataFrame(a)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,a)
rmse= sqrt(mean_squared_error(y_test,a))
mae=metrics.mean_absolute_error(y_test,a)
return mape,rmse,mae
# In[31]:
def hybrid_ceemdan_lstm(datass,look_back,data_partition,max_features,epoch,batch_size,neuron,lr,optimizer):
from PyEMD import CEEMDAN
dfs=datass
s = dfs.values
emd = CEEMDAN(epsilon=0.05)
emd.noise_seed(12345)
IMFs = emd(s)
full_imf=pd.DataFrame(IMFs)
data_imf=full_imf.T
pred_test=[]
test_ori=[]
pred_train=[]
train_ori=[]
for col in data_imf:
datasetss2=pd.DataFrame(data_imf[col])
datasets=datasetss2.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
neuron=neuron
model = Sequential()
model.add(LSTM(units = neuron,input_shape=(trainX.shape[1], trainX.shape[2])))
model.add(Dense(1))
optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(loss='mse',optimizer=optimizer)
numpy.random.seed(1234)
# Fitting the RNN to the Training set
model.fit(trainX, y, epochs = epoch, batch_size = batch_size,verbose=0)
# make predictions
y_pred_train = model.predict(trainX)
y_pred_test = model.predict(testX)
# make predictions
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_train= numpy.array(y_pred_train).ravel()
y_pred_train=pd.DataFrame(y_pred_train)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_pred_train1= sc_y.inverse_transform (y_pred_train)
pred_test.append(y_pred_test1)
test_ori.append(y_test)
pred_train.append(y_pred_train1)
train_ori.append(y_train)
result_pred_test= pd.DataFrame.from_records(pred_test)
result_pred_train= pd.DataFrame.from_records(pred_train)
a=result_pred_test.sum(axis = 0, skipna = True)
b=result_pred_train.sum(axis = 0, skipna = True)
dataframe=pd.DataFrame(dfs)
dataset=dataframe.values
train_size = int(len(dataset) * data_partition)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size], dataset[train_size:len(dataset)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
a= pd.DataFrame(a)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,a)
rmse= sqrt(mean_squared_error(y_test,a))
mae=metrics.mean_absolute_error(y_test,a)
return mape,rmse,mae
# In[32]:
def proposed_method(datass,look_back,data_partition,max_features,epoch,batch_size,neuron,lr,optimizer):
from PyEMD import CEEMDAN
dfs=datass
s = dfs.values
emd = CEEMDAN(epsilon=0.05)
emd.noise_seed(12345)
IMFs = emd(s)
full_imf=pd.DataFrame(IMFs)
data_imf=full_imf.T
pred_test=[]
test_ori=[]
pred_train=[]
train_ori=[]
n_imf=len(data_imf.columns)
k=list(range(1,n_imf))
m=[0]
for i in m:
datasetss2=pd.DataFrame(data_imf[i])
datasets=datasetss2.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.ensemble import RandomForestRegressor
grid = RandomForestRegressor(max_features=max_features)
grid.fit(X,y)
y_pred_train= grid.predict(X)
y_pred_test= grid.predict(X1)
y_pred_test=pd.DataFrame(y_pred_test)
y_pred_train=pd.DataFrame(y_pred_train)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_pred_train1= sc_y.inverse_transform (y_pred_train)
pred_test.append(y_pred_test1)
test_ori.append(y_test)
pred_train.append(y_pred_train1)
train_ori.append(y_train)
for i in k:
datasetss2=pd.DataFrame(data_imf[i])
datasets=datasetss2.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
neuron=neuron
model = Sequential()
model.add(LSTM(units = neuron,input_shape=(trainX.shape[1], trainX.shape[2])))
model.add(Dense(1))
optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(loss='mse',optimizer=optimizer)
numpy.random.seed(1234)
# Fitting the RNN to the Training set
model.fit(trainX, y, epochs = epoch, batch_size = batch_size,verbose=0)
# make predictions
y_pred_train = model.predict(trainX)
y_pred_test = model.predict(testX)
# make predictions
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_train= numpy.array(y_pred_train).ravel()
y_pred_train=pd.DataFrame(y_pred_train)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_pred_train1= sc_y.inverse_transform (y_pred_train)
pred_test.append(y_pred_test1)
test_ori.append(y_test)
pred_train.append(y_pred_train1)
train_ori.append(y_train)
result_pred_test= pd.DataFrame.from_records(pred_test)
result_pred_train= pd.DataFrame.from_records(pred_train)
a=result_pred_test.sum(axis = 0, skipna = True)
b=result_pred_train.sum(axis = 0, skipna = True)
dataframe=pd.DataFrame(dfs)
dataset=dataframe.values
train_size = int(len(dataset) * data_partition)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size], dataset[train_size:len(dataset)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
a= pd.DataFrame(a)
y_test= pd.DataFrame(y_test)
import numpy as np
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,a)
rmse= sqrt(mean_squared_error(y_test,a))
mae=metrics.mean_absolute_error(y_test,a)
return mape,rmse,mae,a,y_test
# In[ ]:
# In[ ]:
# In[ ]:
| 26.446875
| 123
| 0.668597
| 3,760
| 25,389
| 4.242819
| 0.054255
| 0.041058
| 0.024823
| 0.045258
| 0.910424
| 0.886855
| 0.876951
| 0.85194
| 0.845045
| 0.840657
| 0
| 0.021357
| 0.22175
| 25,389
| 959
| 124
| 26.474453
| 0.786022
| 0.025759
| 0
| 0.851171
| 0
| 0
| 0.001626
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018395
| false
| 0
| 0.09699
| 0.001672
| 0.133779
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
adf31e43e431eaa27ed2701582ebadf14e8447c7
| 19,210
|
py
|
Python
|
smm_wrapper/views.py
|
gesiscss/smm_wrapper
|
e075d75ccd6a6c944cfcdd236e6527157e31279a
|
[
"MIT"
] | null | null | null |
smm_wrapper/views.py
|
gesiscss/smm_wrapper
|
e075d75ccd6a6c944cfcdd236e6527157e31279a
|
[
"MIT"
] | null | null | null |
smm_wrapper/views.py
|
gesiscss/smm_wrapper
|
e075d75ccd6a6c944cfcdd236e6527157e31279a
|
[
"MIT"
] | null | null | null |
"""Summary
"""
import pandas as pd
import itertools
from typing import Union
from .api import SMMAPI
from . import __version__
class DataView:
"""Qurey methods for correspondence of the SMMAPI methods
Attributes:
api (TYPE): Description
"""
def __init__(self, api, id_column):
"""Constructor of the DataView
Args:
api (TYPE): the SMMAPI
"""
self.api = api
self.id_column = id_column
def get_politicians(self) -> pd.DataFrame:
"""Get entities of all politicians and their respective facebook, twitter and wikipedia ids.
Returns:
dataframe: result of the api query as documented in Entity list in
http://mediamonitoring.gesis.org/politicians/api/politicians/
politician_id:int, unique identifier for a politician
name:str, name of a politician
firstname:str, firstname of a politician
affiliation:str, party to which a politician is affiliated
fb_ids:list(int), ids of all facebook accounts for a politician
tw_ids:list(int), ids of all twitter accounts for a politician
wp_ids:list(int), ids of all wikipedia pages for a politician
wp_titles:list(string), wikipedia titles associated to the politician
"""
response = self.api.get_all()
return pd.DataFrame(response, columns=[
'politician_id', 'name', 'firstname', 'affiliation', 'fb_ids', 'tw_ids',
'wp_ids', 'wp_titles', 'tw_sns', 'wp_sns'
]).set_index('politician_id')
def get_politician(self, _id) -> pd.DataFrame:
"""Get entities of a politicians and their respective facebook, twitter and wikipedia ids.
Input parameters:
_id (str): A unique value identifying this politician or an organization.
Returns:
dataframe: result of the api query as documented in Entity list in
http://mediamonitoring.gesis.org/politicians/api/politicians/
politician_id:int, unique identifier for a politician
name:str, name ofs a politician
firstname:str, firstname of a politician
affiliation:str, party to which a politician is affiliated
fb_ids:list(int), ids of all facebook accounts for a politician
tw_ids:list(int), ids of all twitter accounts for a politician
wp_ids:list(int), ids of all wikipedia pages for a politician
wp_titles:list(string), wikipedia titles associated to the politician
"""
response = self.api.get(_id)
return pd.Series(response)
def get_organizations(self) -> pd.DataFrame:
"""Get entities of all organizations and their respective facebook, twitter and wikipedia ids.
Returns:
dataframe: result of the api query as documented in Entity list in
http://mediamonitoring.gesis.org/api/organizations/all
organization_id:int, unique identifier for an organization
name:str, name of an organization
category:str, type of an organization (media or a party)
subcategory:str, subcategory of an organization (name of a party or type of media)
fb_ids:list(int), ids of all facebook accounts for an organization
tw_ids:list(int), ids of all twitter accounts for an organization
wp_ids:list(int), ids of all wikipedia pages for an organization
wp_titles:list(string), wikipedia titles associated to the organization
"""
response = self.api.get_all()
return pd.DataFrame(response, columns=[
'organization_id', 'name', 'category', 'subcategory', 'fb_ids', 'tw_ids',
'wp_ids', 'wp_titles', 'tw_sns', 'wp_sns'
]).set_index('organization_id')
def get_organization(self, _id) -> pd.DataFrame:
"""Get entities of an organization and their respective facebook, twitter and wikipedia ids.
Input parameters:
_id (str): A unique value identifying this politician or an organization.
Returns:
dataframe: result of the api query as documented in Entity list in
http://mediamonitoring.gesis.org/politicians/api/politicians/
politician_id:int, unique identifier for a politician
name:str, name ofs a politician
firstname:str, firstname of a politician
affiliation:str, party to which a politician is affiliated
fb_ids:list(int), ids of all facebook accounts for a politician
tw_ids:list(int), ids of all twitter accounts for a politician
wp_ids:list(int), ids of all wikipedia pages for a politician
wp_titles:list(string), wikipedia titles associated to the politician
"""
response = self.api.get(_id)
return pd.Series(response)
def get_all(self) -> pd.DataFrame:
"""Get all entities and their respective facebook, twitter and wikipedia ids.
Returns:
dataframe: result of the api query as documented in Entity list in
http://mediamonitoring.gesis.org/api/organizations/all
organization_id:int, unique identifier for an organization
name:str, name of an organization
category:str, type of an organization (media or a party)
subcategory:str, subcategory of an organization (name of a party or type of media)
fb_ids:list(int), ids of all facebook accounts for an organization
tw_ids:list(int), ids of all twitter accounts for an organization
wp_ids:list(int), ids of all wikipedia pages for an organization
wp_titles:list(string), wikipedia titles associated to the organization
"""
response = self.api.get_all()
return pd.DataFrame(response).set_index(self.id_column)
def get_one(self, _id) -> pd.DataFrame:
"""Get an entity and their respective facebook, twitter and wikipedia ids.
Input parameters:
_id (str): A unique value identifying this politician or an organization.
Returns:
dataframe: result of the api query as documented in Entity list in
http://mediamonitoring.gesis.org/politicians/api/politicians/
politician_id:int, unique identifier for a politician
name:str, name ofs a politician
firstname:str, firstname of a politician
affiliation:str, party to which a politician is affiliated
fb_ids:list(int), ids of all facebook accounts for a politician
tw_ids:list(int), ids of all twitter accounts for a politician
wp_ids:list(int), ids of all wikipedia pages for a politician
wp_titles:list(string), wikipedia titles associated to the politician
"""
response = self.api.get_one(_id)
return pd.Series(response)
def tweets_by(self, twitter_user_id=None, _id=None, text_contains=None, from_date=None, to_date=None, aggregate_by='month'):
"""Returns query tweets made by politicians, or by a politician using twitter id or using politician id
Input parameters:
twitter_user_id (str): twitter user id
OR
_id (str): A unique value identifying this politician or an organization.
optional:
text_contains (str): filter tweets by the content of the message
from_date (string($date)): filter by tweets posted after this date (format: YYYY-MM-DD)
to_date (string($date)): filter by tweets posted before this date (format: YYYY-MM-DD)
aggregate_by (str): criteria that will be used to aggregate (month by default)
Returns:
DataFrame, result of the api query as documented in twitter tweets_by/reply_to content in http://mediamonitoring.gesis.org/api/politicians/swagger/
"""
response = self.api.tweets_by(
twitter_user_id, _id, text_contains, from_date, to_date, aggregate_by)
if twitter_user_id is not None:
response['twitter_user_id'] = twitter_user_id
if _id is not None:
response['_id'] = _id
if text_contains is not None:
response['text_contains'] = text_contains
if from_date is not None:
response['from_date'] = from_date
if to_date is not None:
response['to_date'] = to_date
response.pop('response_type')
response.pop('aggregated_by')
response['date'] = response.pop('labels')
response['tweets'] = response.pop('values')
df = pd.DataFrame(response)
df['date'] = pd.to_datetime(df['date'])
return df
def replies_to(self, twitter_user_id=None, _id=None, text_contains=None, from_date=None, to_date=None, aggregate_by='month'):
"""Returns query twitter replies made by politicians, or by a politician using twitter id or using politician id
Input parameters:
twitter_user_id (str): twitter user id
OR
_id (str): A unique value identifying this politician or an organization.
optional:
text_contains (str): filter tweets by the content of the message
from_date (string($date)): filter by tweets posted after this date (format: YYYY-MM-DD)
to_date (string($date)): filter by tweets posted before this date (format: YYYY-MM-DD)
aggregate_by (str): criteria that will be used to aggregate (month by default)
Returns:
DataFrame, result of the api query as documented in twitter tweets_by/reply_to content in http://mediamonitoring.gesis.org/api/politicians/swagger/
"""
response = self.api.replies_to(
twitter_user_id, _id, text_contains, from_date, to_date, aggregate_by)
if twitter_user_id is not None:
response['twitter_user_id'] = twitter_user_id
if _id is not None:
response['_id'] = _id
if text_contains is not None:
response['text_contains'] = text_contains
if from_date is not None:
response['from_date'] = from_date
if to_date is not None:
response['to_date'] = to_date
response.pop('response_type')
response.pop('aggregated_by')
response['date'] = response.pop('labels')
response['replies'] = response.pop('values')
df = pd.DataFrame(response)
df['date'] = pd.to_datetime(df['date'])
return df
def posts_by(self, facebook_user_id=None, _id=None, text_contains=None, from_date=None, to_date=None, aggregate_by='month'):
"""Returns query facebook posts made by politicians, or by a politician using facebook id or using politician id
Input parameters:
facebook_user_id (str): facebook user id
OR
_id (str): A unique value identifying this politician or an organization.
optional:
text_contains (str): filter facebook posts by the content of the message
from_date (string($date)): filter by facebook posts posted after this date (format: YYYY-MM-DD)
to_date (string($date)): filter by facebook posts posted before this date (format: YYYY-MM-DD)
aggregate_by (str): criteria that will be used to aggregate (month by default)
Returns:
DataFrame, result of the api query as documented in facebook posts_by content in http://mediamonitoring.gesis.org/api/politicians/swagger/
"""
response = self.api.posts_by(
facebook_user_id, _id, text_contains, from_date, to_date, aggregate_by)
if facebook_user_id is not None:
response['facebook_user_id'] = facebook_user_id
if _id is not None:
response['_id'] = _id
if text_contains is not None:
response['text_contains'] = text_contains
if from_date is not None:
response['from_date'] = from_date
if to_date is not None:
response['to_date'] = to_date
response.pop('response_type')
response.pop('aggregated_by')
response['date'] = response.pop('labels')
response['posts'] = response.pop('values')
df = pd.DataFrame(response)
df['date'] = pd.to_datetime(df['date'])
return df
def comments_by(self, facebook_user_id=None, _id=None, text_contains=None, from_date=None, to_date=None, aggregate_by='month'):
"""Returns query facebook comments made by politicians, or by a politician using facebook id or using politician id
Input parameters:
facebook_user_id (str): facebook user id
OR
_id (str): A unique value identifying this politician or an organization.
optional:
text_contains (str): filter facebook comments by the content of the message
from_date (string($date)): filter by facebook comments posted after this date (format: YYYY-MM-DD)
to_date (string($date)): filter by facebook comments posted before this date (format: YYYY-MM-DD)
aggregate_by (str): criteria that will be used to aggregate (month by default)
Returns:
DataFrame, result of the api query as documented in facebook comments_by content in http://mediamonitoring.gesis.org/api/politicians/swagger/
"""
response = self.api.comments_by(
facebook_user_id, _id, text_contains, from_date, to_date, aggregate_by)
if facebook_user_id is not None:
response['facebook_user_id'] = facebook_user_id
if _id is not None:
response['_id'] = _id
if text_contains is not None:
response['text_contains'] = text_contains
if from_date is not None:
response['from_date'] = from_date
if to_date is not None:
response['to_date'] = to_date
response.pop('response_type')
response.pop('aggregated_by')
response['date'] = response.pop('labels')
response['comments'] = response.pop('values')
df = pd.DataFrame(response)
df['date'] = pd.to_datetime(df['date'])
return df
def wikipedia(self, wikipedia_page_id=None, _id=None, text_contains=None, from_date=None, to_date=None, aggregate_by='month'):
"""Returns query wikipedia change objects (chobs) made by politicians, or by a politician using wikipedia id or using politician id
Input parameters:
wikipedia_page_id (str): wikipedia page id
OR
_id (str): A unique value identifying this politician or an organization.
optional:
text_contains (str): filter chobs by the content of the message
from_date (string($date)): filter by chobs posted after this date (format: YYYY-MM-DD)
to_date (string($date)): filter by chobs posted before this date (format: YYYY-MM-DD)
aggregate_by (str): criteria that will be used to aggregate (month by default)
Returns:
DataFrame, result of the api query as documented in wikipedia content in http://mediamonitoring.gesis.org/api/politicians/swagger/
"""
response = self.api.wikipedia(
wikipedia_page_id, _id, text_contains, from_date, to_date, aggregate_by)
if wikipedia_page_id is not None:
response['wikipedia_page_id'] = wikipedia_page_id
if _id is not None:
response['_id'] = _id
if text_contains is not None:
response['text_contains'] = text_contains
if from_date is not None:
response['from_date'] = from_date
if to_date is not None:
response['to_date'] = to_date
if aggregate_by is None:
response.pop('response_type')
df = pd.DataFrame(response)
df = df['chobs'].apply(pd.Series)[['right_token','left_token', 'ins_tokens', 'del_tokens',
'right_token_str', 'left_token_str', 'ins_tokens_str', 'del_tokens_str']]
#df = df[['right_token','left_token']]
else:
response.pop('response_type')
response.pop('aggregated_by')
response['date'] = response.pop('labels')
response['chobs'] = response.pop('values')
df = pd.DataFrame(response)
df['date'] = pd.to_datetime(df['date'])
return df
def general_tweets(self, twitter_user_id=None, text_contains=None, from_date=None, to_date=None, aggregate_by='month'):
"""Returns query tweets made by the general population. This tweets were collected separatedly using keywords.
Input parameters:
optional:
twitter_user_id (str): twitter user id
text_contains (str): filter chobs by the content of the message
from_date (string($date)): filter by chobs after this date (format: YYYY-MM-DD)
to_date (string($date)): filter by chobs before this date (format: YYYY-MM-DD)
aggregate_by (str): criteria that will be used to aggregate (month by default)
Returns:
dataframe, result of the api query as documented in twitter (general public) content in http://mediamonitoring.gesis.org/api/politicians/swagger/
"""
response = self.api.general_tweets(
twitter_user_id, text_contains, from_date, to_date, aggregate_by)
if twitter_user_id is not None:
response['twitter_user_id'] = twitter_user_id
if text_contains is not None:
response['text_contains'] = text_contains
if from_date is not None:
response['from_date'] = from_date
if to_date is not None:
response['to_date'] = to_date
response.pop('response_type')
response.pop('aggregated_by')
response['date'] = response.pop('labels')
response['tweets'] = response.pop('values')
df = pd.DataFrame(response)
df['date'] = pd.to_datetime(df['date'])
return df
| 47.083333
| 160
| 0.609526
| 2,370
| 19,210
| 4.789451
| 0.065823
| 0.038058
| 0.022994
| 0.043432
| 0.898599
| 0.890847
| 0.888468
| 0.871994
| 0.866443
| 0.864505
| 0
| 0
| 0.312598
| 19,210
| 407
| 161
| 47.199017
| 0.859599
| 0.521187
| 0
| 0.713376
| 0
| 0
| 0.126604
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082803
| false
| 0
| 0.031847
| 0
| 0.197452
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc15b04291691f0ed8c2a3dcf740be889133c311
| 8,759
|
py
|
Python
|
tests/data/diff1.py
|
lengau/sshedit
|
f6a7d34643ee2bb2e6894b314a73f2c57e015186
|
[
"BSD-2-Clause"
] | 5
|
2015-05-27T02:02:38.000Z
|
2020-11-17T04:54:47.000Z
|
tests/data/diff1.py
|
lengau/sshed
|
f6a7d34643ee2bb2e6894b314a73f2c57e015186
|
[
"BSD-2-Clause"
] | 24
|
2015-05-21T23:12:01.000Z
|
2015-11-12T18:35:46.000Z
|
tests/data/diff1.py
|
lengau/sshed
|
f6a7d34643ee2bb2e6894b314a73f2c57e015186
|
[
"BSD-2-Clause"
] | null | null | null |
# pylint: disable=missing-docstring, line-too-long
FINAL = b"""\
First line
Line two.
Line 3
Four?
Five!
7
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante hendrerit. Donec et mollis dolor. Praesent et diam eget libero egestas mattis sit amet vitae augue. Nam tincidunt congue enim, ut porta lorem lacinia consectetur. Donec ut libero sed arcu vehicula ultricies a non tortor. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean ut gravida lorem. Ut turpis felis, pulvinar a semper sed, adipiscing id dolor. Pellentesque auctor nisi id magna consequat sagittis. Curabitur dapibus enim sit amet elit pharetra tincidunt feugiat nisl imperdiet. Ut convallis libero in urna ultrices accumsan. Donec sed odio eros. Donec viverra mi quis quam pulvinar at malesuada arcu rhoncus. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. In rutrum accumsan ultricies. Mauris vitae nisi at sem facilisis semper ac in est.
Sed et erat faucibus nunc euismod ultricies ut id justo. Nullam cursus suscipit nisi, et ultrices justo sodales nec. Fusce venenatis facilisis lectus ac semper. Aliquam at massa ipsum. Quisque bibendum purus convallis nulla ultrices ultricies. Nullam aliquam, mi eu aliquam tincidunt, purus velit laoreet tortor, viverra pretium nisi quam vitae mi. Fusce vel volutpat elit. Nam sagittis nisi dui.
Suspendisse lectus leo, consectetur in tempor sit amet, placerat quis neque. Etiam luctus porttitor lorem, sed suscipit est rutrum non. Curabitur lobortis nisl a enim congue semper. Aenean commodo ultrices imperdiet. Vestibulum ut justo vel sapien venenatis tincidunt. Phasellus eget dolor sit amet ipsum dapibus condimentum vitae quis lectus. Aliquam ut massa in turpis dapibus convallis. Ut augue nunc, sodales ut euismod non, adipiscing vitae orci. Mauris ut placerat justo. Mauris in ultricies enim. Quisque nec est eleifend nulla ultrices egestas quis ut quam. Donec sollicitudin lectus a mauris pulvinar id aliquam urna cursus. Cras quis ligula sem, vel elementum mi. Phasellus non ullamcorper urna.
Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. In euismod ultrices facilisis. Vestibulum porta sapien adipiscing augue congue id pretium lectus molestie. Proin quis dictum nisl. Morbi id quam sapien, sed vestibulum sem. Duis elementum rutrum mauris sed convallis. Proin vestibulum magna mi. Aenean tristique hendrerit magna, ac facilisis nulla hendrerit ut. Sed non tortor sodales quam auctor elementum. Donec hendrerit nunc eget elit pharetra pulvinar. Suspendisse id tempus tortor. Aenean luctus, elit commodo laoreet commodo, justo nisi consequat massa, sed vulputate quam urna quis eros. Donec vel.
"""
DIFF = [
b'--- tmp 2015-05-29 16:46:52.722077075 -0400\n',
b'+++ tmp2 2015-06-05 20:28:39.700598812 -0400\n',
b'@@ -1,21 +1,17 @@\n',
b' First line\n',
b'-Second.\n',
b'+Line two.\n',
b' Line 3\n',
b' Four?\n',
b' Five!\n',
b'-6\n',
b' 7\n',
b' \n',
b'-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante hendrerit. Donec et mollis dolor. Praesent et diam eget libero egestas mattis sit amet vitae augue. Nam tincidunt congue enim, ut porta lorem lacinia consectetur. Donec ut libero sed arcu vehicula ultricies a non tortor. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean ut gravida lorem. Ut turpis felis, pulvinar a semper sed, adipiscig id dolor. Pellentesque auctor nisi id magna consequat sagittis. Curabitur dapibus enim sit amet elit pharetra tincidunt feugiat nisl imperdiet. Ut convallis libero in urna ultrices accumsan. Donec sed odio eros. Donec viverra mi quis quam pulvinar at malesuada arcu rhoncus. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. In rutrum accumsan ultricies. Mauris vitae nisi at sem facilisis semper ac in est.\n',
b'+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante hendrerit. Donec et mollis dolor. Praesent et diam eget libero egestas mattis sit amet vitae augue. Nam tincidunt congue enim, ut porta lorem lacinia consectetur. Donec ut libero sed arcu vehicula ultricies a non tortor. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean ut gravida lorem. Ut turpis felis, pulvinar a semper sed, adipiscing id dolor. Pellentesque auctor nisi id magna consequat sagittis. Curabitur dapibus enim sit amet elit pharetra tincidunt feugiat nisl imperdiet. Ut convallis libero in urna ultrices accumsan. Donec sed odio eros. Donec viverra mi quis quam pulvinar at malesuada arcu rhoncus. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. In rutrum accumsan ultricies. Mauris vitae nisi at sem facilisis semper ac in est.\n',
b' \n',
b' \n',
b'-Vivamus fermentum semper porta. Nunc diam velit, adipiscing ut tristique vitae, sagittis vel odio. Maecenas convallis ullamcorper ultricies. Curabitur ornare, ligula semper consectetur sagittis, nisi diam iaculis velit, id fringilla sem nunc vel mi. Nam dictum, odio nec pretium volutpat, arcu ante placerat erat, non tristique elit urna et turpis. Quisque mi metus, ornare sit amet fermentum et, tincidunt et orci. Fusce eget orci a orci congue vestibulum. Ut dolor diam, elementum et vestibulum eu, porttitor vel elit. Curabitur venenatis pulvinar tellus gravida ornare. Sed et erat faucibus nunc euismod ultricies ut id justo. Nullam cursus suscipit nisi, et ultrices justo sodales nec. Fusce venenatis facilisis lectus ac semper. Aliquam at massa ipsum. Quisque bibendum purus convallis nulla ultrices ultricies. Nullam aliquam, mi eu aliquam tincidunt, purus velit laoreet tortor, viverra pretium nisi quam vitae mi. Fusce vel volutpat elit. Nam sagittis nisi dui.\n',
b'+Sed et erat faucibus nunc euismod ultricies ut id justo. Nullam cursus suscipit nisi, et ultrices justo sodales nec. Fusce venenatis facilisis lectus ac semper. Aliquam at massa ipsum. Quisque bibendum purus convallis nulla ultrices ultricies. Nullam aliquam, mi eu aliquam tincidunt, purus velit laoreet tortor, viverra pretium nisi quam vitae mi. Fusce vel volutpat elit. Nam sagittis nisi dui.\n',
b' \n',
b' \n',
b'-Suspendisse lectus leo, consectetur in tempor sit amet, placerat quis neque. Etiam luctus porttitor lorem, sed suscipit est rutrum non. Curabitur lobortis nisl a enim congue semper. Aenean commodo ultrices imperdiet. Vestibulum ut justo vel sapien venenatis tincidunt. Phasellus eget dolor sit amet ipsum dapibus condimentum vitae quis lectus. Aliquam ut massa in turpis dapibus convallis. Praesent elit lacus, vestibulum at malesuada et, ornare et est. Ut augue nunc, sodales ut euismod non, adipiscing vitae orci. Mauris ut placerat justo. Mauris in ultricies enim. Quisque nec est eleifend nulla ultrices egestas quis ut quam. Donec sollicitudin lectus a mauris pulvinar id aliquam urna cursus. Cras quis ligula sem, vel elementum mi. Phasellus non ullamcorper urna.\n',
b'+Suspendisse lectus leo, consectetur in tempor sit amet, placerat quis neque. Etiam luctus porttitor lorem, sed suscipit est rutrum non. Curabitur lobortis nisl a enim congue semper. Aenean commodo ultrices imperdiet. Vestibulum ut justo vel sapien venenatis tincidunt. Phasellus eget dolor sit amet ipsum dapibus condimentum vitae quis lectus. Aliquam ut massa in turpis dapibus convallis. Ut augue nunc, sodales ut euismod non, adipiscing vitae orci. Mauris ut placerat justo. Mauris in ultricies enim. Quisque nec est eleifend nulla ultrices egestas quis ut quam. Donec sollicitudin lectus a mauris pulvinar id aliquam urna cursus. Cras quis ligula sem, vel elementum mi. Phasellus non ullamcorper urna.\n',
b' \n',
b' \n',
b' Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. In euismod ultrices facilisis. Vestibulum porta sapien adipiscing augue congue id pretium lectus molestie. Proin quis dictum nisl. Morbi id quam sapien, sed vestibulum sem. Duis elementum rutrum mauris sed convallis. Proin vestibulum magna mi. Aenean tristique hendrerit magna, ac facilisis nulla hendrerit ut. Sed non tortor sodales quam auctor elementum. Donec hendrerit nunc eget elit pharetra pulvinar. Suspendisse id tempus tortor. Aenean luctus, elit commodo laoreet commodo, justo nisi consequat massa, sed vulputate quam urna quis eros. Donec vel.\n',
b'-onec vel.\n',
b'-\n',
b'-onec vel.\n',
]
| 168.442308
| 980
| 0.791415
| 1,331
| 8,759
| 5.208114
| 0.150263
| 0.00779
| 0.01558
| 0.004616
| 0.893826
| 0.890941
| 0.889498
| 0.889498
| 0.889498
| 0.889498
| 0
| 0.008989
| 0.161776
| 8,759
| 51
| 981
| 171.745098
| 0.935168
| 0.00548
| 0
| 0.214286
| 0
| 0.261905
| 0.967735
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
70c1c8ebd06eb81e57c08f7ab1fe11b746816076
| 414
|
py
|
Python
|
ci/ci/__init__.py
|
krcurtis/hail
|
ca297546eee33e237634d5f69e06f8fe14fdd1eb
|
[
"MIT"
] | 1
|
2020-03-25T01:53:50.000Z
|
2020-03-25T01:53:50.000Z
|
ci/ci/__init__.py
|
krcurtis/hail
|
ca297546eee33e237634d5f69e06f8fe14fdd1eb
|
[
"MIT"
] | 1
|
2022-03-28T19:35:55.000Z
|
2022-03-28T19:35:55.000Z
|
ci/ci/__init__.py
|
krcurtis/hail
|
ca297546eee33e237634d5f69e06f8fe14fdd1eb
|
[
"MIT"
] | null | null | null |
from . import batch_helper, build_state, ci, ci_logging, constants, environment, git_state, github, google_storage, \
http_helper, pr, prs, sentinel, shell_helper
__all__ = [
'batch_helper',
'build_state',
'ci',
'ci_logging',
'constants',
'environment',
'git_state',
'github',
'google_storage',
'http_helper',
'pr',
'prs',
'sentinel',
'shell_helper',
]
| 20.7
| 117
| 0.620773
| 45
| 414
| 5.311111
| 0.444444
| 0.09205
| 0.133891
| 0.175732
| 0.945607
| 0.945607
| 0.945607
| 0.945607
| 0.945607
| 0.945607
| 0
| 0
| 0.2343
| 414
| 19
| 118
| 21.789474
| 0.753943
| 0
| 0
| 0
| 0
| 0
| 0.289855
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
70c3dc1e889f4eaaf0cb3047dcd9619407b097b0
| 94
|
py
|
Python
|
notecron/center/pages/main/__init__.py
|
notechats/notejob
|
bf12c80a08761b97cb9405afa0706ccbb413eee0
|
[
"MulanPSL-1.0"
] | null | null | null |
notecron/center/pages/main/__init__.py
|
notechats/notejob
|
bf12c80a08761b97cb9405afa0706ccbb413eee0
|
[
"MulanPSL-1.0"
] | null | null | null |
notecron/center/pages/main/__init__.py
|
notechats/notejob
|
bf12c80a08761b97cb9405afa0706ccbb413eee0
|
[
"MulanPSL-1.0"
] | null | null | null |
from . import errors, views
from .core import blue_print
from .core import blue_print as main
| 23.5
| 36
| 0.797872
| 16
| 94
| 4.5625
| 0.5625
| 0.219178
| 0.383562
| 0.493151
| 0.630137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159574
| 94
| 3
| 37
| 31.333333
| 0.924051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 9
|
70ce7d7a7afcb785257d7b2fdc1fba1bfac84cd8
| 5,625
|
py
|
Python
|
Sax/Final_code_test/dtw_visualization.py
|
rakesh-lagare/Thesis_Work
|
733285eae31a3fd8b613ec30d9e2ab9befd57614
|
[
"Apache-2.0"
] | 2
|
2018-08-30T18:29:53.000Z
|
2019-02-21T15:07:15.000Z
|
Sax/Final_code_test/dtw_visualization.py
|
rakesh-lagare/Thesis_Work
|
733285eae31a3fd8b613ec30d9e2ab9befd57614
|
[
"Apache-2.0"
] | null | null | null |
Sax/Final_code_test/dtw_visualization.py
|
rakesh-lagare/Thesis_Work
|
733285eae31a3fd8b613ec30d9e2ab9befd57614
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pyplot as plt
import os
def dtw_visualization1(dtw_df,seg_df):
if(len(dtw_df)> 0):
idx1 = dtw_df['index1'].tolist()
idx2 = dtw_df['index2'].tolist()
idx= idx1 + idx2
unique_list = list(set(idx))
lent= len(unique_list)
row=int(lent/4)
key = dtw_df.iloc[0]['key']
#print(key)
#print(unique_list)
if(lent > 4):
fig = plt.figure(figsize=(3*3, 4*3))
for i in range(0,lent):
row1 = seg_df.loc[seg_df['indices'] == unique_list[i]]
sub_section = row1.iloc[0]['sub_section']
#key = row1.iloc[0]['key']
fig.add_subplot(row+1, 4,i+1 )
plt.plot(sub_section)
#plt.plot(sub_section, '--.')
else:
fig = plt.figure(figsize=(3*3, 4*3))
for i in range(0,lent):
row1 = seg_df.loc[seg_df['indices'] == unique_list[i]]
sub_section = row1.iloc[0]['sub_section']
fig.add_subplot(5, 2,i+1 )
#plt.plot(sub_section, '--.')
plt.plot(sub_section)
plt.savefig('./Output/without_param/' +key+'.png')
plt.show()
def dtw_visualization2(dtw_df):
key = dtw_df.iloc[0]['key']
#print(key)
#print(dtw_df.iloc[0]['index1'])
sub_section = dtw_df.iloc[0]['sub_section1']
#plt.plot(sub_section, '--.')
plt.plot(sub_section)
plt.savefig('./Output/without_param/' +key+'.png')
plt.show()
def dtw_visualization3(dtw_df,skip_offset,ts):
idx1 = dtw_df['index1'].tolist()
idx2 = dtw_df['index2'].tolist()
idx= idx1 + idx2
unique_list = list(set(idx))
#print(unique_list)
plt.figure(figsize=(16,10), dpi= 60)
plt.plot(ts)
for i in unique_list:
start_idx = i
end_idx= i + skip_offset
plt.axvspan(start_idx, end_idx, color='red', alpha=0.4)
plt.show()
def dtw_visualization_scale(key,idx,seg_df):
unique_list = list(set(idx))
lent= len(unique_list)
row=int(lent/4)
#print(key)
#print(unique_list)
if(lent > 4):
fig = plt.figure(figsize=(3*3, 4*3))
for i in range(0,lent):
row1 = seg_df.loc[seg_df['indices'] == unique_list[i]]
sub_section = row1.iloc[0]['sub_section']
fig.add_subplot(row+1, 4,i+1 )
plt.plot(sub_section)
#plt.plot(sub_section, '--.')
else:
fig = plt.figure(figsize=(3*3, 4*3))
for i in range(0,lent):
row1 = seg_df.loc[seg_df['indices'] == unique_list[i]]
sub_section = row1.iloc[0]['sub_section']
fig.add_subplot(5, 2,i+1 )
#plt.plot(sub_section, '--.')
plt.plot(sub_section)
plt.savefig('./Output/with_param/' +key+'.png')
plt.show()
def dtw_visualization_scale2(dtw_df):
key = dtw_df.iloc[0]['key']
#print(key)
#print(dtw_df.iloc[0]['index1'])
sub_section = dtw_df.iloc[0]['sub_section1']
#plt.plot(sub_section, '--.')
plt.plot(sub_section)
plt.savefig('./Output/with_param/' +key+'.png')
plt.show()
def dtw_visualization_DTW(idx,seg_df):
lent= len(idx)
row=int(lent/4)
#print(key)
#print(unique_list)
if(lent > 4):
fig = plt.figure(figsize=(3*3, 4*3))
for i in range(0,lent):
row1 = seg_df.loc[seg_df['indices'] == idx[i]]
sub_section = row1.iloc[0]['sub_section']
fig.add_subplot(row+1, 4,i+1 )
plt.plot(sub_section)
plt.savefig('./Output/DTW/' +str(i)+'.png')
plt.show()
#plt.plot(sub_section, '--.')
else:
fig = plt.figure(figsize=(3*3, 4*3))
for i in range(0,lent):
row1 = seg_df.loc[seg_df['indices'] == idx[i]]
sub_section = row1.iloc[0]['sub_section']
fig.add_subplot(5, 2,i+1 )
#plt.plot(sub_section, '--.')
plt.plot(sub_section)
plt.savefig('./Output/DTW/' +str(i)+'.png')
plt.show()
def prep_dtw_vis(key,idx,seg_df):
key = str(key)
unique_list = list(set(idx))
lent= len(unique_list)
row=int(lent/4)
#print(key)
#print(unique_list)
if(lent > 4):
fig = plt.figure(figsize=(3*3, 4*3))
for i in range(0,lent):
row1 = seg_df.loc[seg_df['indices'] == unique_list[i]]
sub_section = row1.iloc[0]['sub_section']
fig.add_subplot(row+1, 4,i+1 )
plt.plot(sub_section)
#plt.plot(sub_section, '--.')
else:
fig = plt.figure(figsize=(3*3, 4*3))
for i in range(0,lent):
row1 = seg_df.loc[seg_df['indices'] == unique_list[i]]
sub_section = row1.iloc[0]['sub_section']
fig.add_subplot(5, 2,i+1 )
#plt.plot(sub_section, '--.')
plt.plot(sub_section)
plt.savefig('./Output/DTW/' +key+'.png')
plt.show()
| 28.553299
| 70
| 0.4784
| 719
| 5,625
| 3.570236
| 0.109875
| 0.148033
| 0.077912
| 0.13245
| 0.861706
| 0.851578
| 0.851578
| 0.851578
| 0.851578
| 0.845345
| 0
| 0.037821
| 0.370133
| 5,625
| 197
| 71
| 28.553299
| 0.686706
| 0.091911
| 0
| 0.831858
| 0
| 0
| 0.07022
| 0.009048
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061947
| false
| 0
| 0.017699
| 0
| 0.079646
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
70fc18a58d2bc2de7d9f6a6d1846a96edda0daae
| 93,122
|
py
|
Python
|
pyroot/_pyroot_old.py
|
SimpleArt/pyroot
|
1f1ac6a644999e86e4c3c83a5107cf2d34069c64
|
[
"MIT"
] | null | null | null |
pyroot/_pyroot_old.py
|
SimpleArt/pyroot
|
1f1ac6a644999e86e4c3c83a5107cf2d34069c64
|
[
"MIT"
] | null | null | null |
pyroot/_pyroot_old.py
|
SimpleArt/pyroot
|
1f1ac6a644999e86e4c3c83a5107cf2d34069c64
|
[
"MIT"
] | null | null | null |
import math
import sys
from enum import Enum
from functools import partial
from math import exp, inf, isinf, isnan, log, nan, sqrt
from typing import Any, Callable, Iterator, Literal, Optional, SupportsFloat, Union, Tuple as tuple, overload
__all__ = ["root_in", "root_iter"]
FLOAT_EPSILON: float = 2 * sys.float_info.epsilon
FLOAT_MAX: float = sys.float_info.max
FLOAT_MIN: float = sys.float_info.min
FLOAT_SMALL_EPSILON: float = 2.0 ** -1074
DerivativeFreeMethod = Literal["bisect", "chandrupatla", "secant"]
ODEMethod = Literal["heun-ode", "midpoint-ode", "newt-ode", "rk45-ode"]
Method = Union[DerivativeFreeMethod, ODEMethod, Literal["newt-safe", "non-simple"]]
derivative_free_methods: tuple[DerivativeFreeMethod, ...] = ("bisect", "chandrupatla", "secant") # type: ignore
ode_methods: tuple[ODEMethod, ...] = ("heun-ode", "midpoint-ode", "newt-ode", "rk45-ode") # type: ignore
methods: tuple[Method, ...] = ("bisect", "chandrupatla", "heun-ode", "midpoint-ode", "newt-ode", "newt-safe", "non-simple", "rk45-ode", "secant") # type: ignore
def float_mean(x1: float, x2: float) -> float:
"""
Special mean of x1 and x2 for floats.
Used for bisecting over floats faster than the arithmetic mean
by converging first to the correct exponent before converging
to the correct mantissa.
"""
s = 0.5 * (sign(x1) + sign(x2))
x1 *= s
x2 *= s
if abs(s) != 1:
return 0.0
elif x1 / 8 < x2 < x1 * 8:
return s * (x1 + 0.5 * (x2 - x1))
elif x1 <= 1 <= x2 or x1 >= 1 >= x2:
return s
elif sqrt(sqrt(x1)) < x2 < x1 * x1 * x1 * x1:
return s * sqrt(x1) * sqrt(x2)
elif x1 < 1:
return s * exp(-sqrt(log(x1) * log(x2)))
else:
return s * exp(sqrt(log(x1) * log(x2)))
def is_between(lo: float, x: float, hi: float) -> bool:
"""Checks if `x` is between the `lo` and `hi` arguments."""
return lo < x < hi or lo > x > hi
def mean(x1: float, x2: float) -> float:
"""Returns the arithmetic mean of x1 and x2 without overflowing."""
return x1 + 0.5 * (x2 - x1) if sign(x1) == sign(x2) else 0.5 * (x1 + x2)
def _power_estimate_error(x1: float, x2: float, x3: float, y1: float, y2: float, y3: float, power: float) -> float:
"""Estimates the error of the power for the given 3 points."""
if (power < 0) ^ (abs(y1) < abs(y3)):
y1 /= y3
y2 /= y3
y3 /= y3
else:
y3 /= y1
y2 /= y1
y1 /= y1
y1 = signed_pow(y1, power)
y2 = signed_pow(y2, power)
y3 = signed_pow(y3, power)
if abs(x1 - x2) < abs(x2 - x3):
return (y3 - y1) * (x2 - x1) / (x3 - x1) + y1 - y2
else:
return (y1 - y3) * (x2 - x3) / (x1 - x3) + y3 - y2
def power_estimate(x1: float, x2: float, x3: float, y1: float, y2: float, y3: float, power: float) -> float:
"""Estimates the the power where `(x, signed_pow(y, power))` forms a straight line."""
if is_between(y1, y2, y3) ^ (power > 0):
power *= -1
if isinf(y1) or isinf(y2) or isinf(y3):
return power
p1 = power
p2 = power * 1.1 + sign(power)
yp1 = _power_estimate_error(x1, x2, x3, y1, y2, y3, p1)
yp2 = _power_estimate_error(x1, x2, x3, y1, y2, y3, p2)
p2 = power + 0.25 * secant(0.0, p2 - p1, yp1, yp2)
yp2 = _power_estimate_error(x1, x2, x3, y1, y2, y3, p2)
p1 = power + 0.5 * secant(0.0, p2 - p1, yp1, yp2)
yp1 = _power_estimate_error(x1, x2, x3, y1, y2, y3, p1)
p2 = secant(p1, p2, yp1, yp2)
return p2 if not isnan(p2) else p1 if not isnan(p1) else power
def secant(x1: float, x2: float, y1: float, y2: float, power: Optional[float] = None) -> float:
"""Helper function to handle edge cases during secant interpolation e.g. overflow."""
if isinf(y1) and isinf(y2) or y1 == y2:
return x1 + 0.5 * (x2 - x1) if sign(x1) == sign(x2) else 0.5 * (x1 + x2)
if abs(y1) < abs(y2):
y1 /= y2
y2 = 1.0
else:
y2 /= y1
y1 = 1.0
if power is None:
pass
elif power > 0:
y1 = signed_pow(y1, power)
y2 = signed_pow(y2, power)
else:
y1 = signed_pow(y1, -power)
y2 = signed_pow(y2, -power)
y1, y2 = y2, y1
if sign(y1) != sign(y2):
return (y1 * x2 - y2 * x1) / (y1 - y2)
elif abs(y1) < abs(y2):
return x1 - (x1 - x2) / (1 - y2 / y1)
else:
return x2 - (x2 - x1) / (1 - y1 / y2)
def sign(x: float) -> Literal[-1, 0, 1]:
"""Returns the sign of a real number: -1, 0, or 1."""
return 1 if x > 0 else -1 if x < 0 else 0
def signed_pow(x: float, power: float) -> float:
"""Returns sign(x) * pow(abs(x), power)."""
try:
return sign(x) * math.pow(abs(x), power)
except OverflowError:
return sign(x) * inf
def type_check(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Optional[Union[Callable[[float], float], Callable[[float, float], float]]],
power: float,
x: Optional[float],
y1: Optional[float],
y2: Optional[float],
abs_err: float,
rel_err: float,
abs_tol: Optional[float],
rel_tol: Optional[float],
method: Method,
) -> Optional[Union[TypeError, ValueError]]:
"""Performs input checks and potentially returns an error."""
if not callable(f):
return TypeError(f"expected a function for f, got {f!r}")
elif not isinstance(x1, SupportsFloat):
return TypeError(f"could not interpret x1 as a real number, got {x1!r}")
elif isnan(float(x1)):
return ValueError(f"x1 is not a number")
elif not isinstance(x2, SupportsFloat):
return TypeError(f"could not interpret x2 as a real number, got {x2!r}")
elif isnan(float(x2)):
return ValueError("x2 is not a number")
elif float(x1) == float(x2):
return ValueError("x1 == x2")
elif method not in methods:
return TypeError(f"invalid method given, got {method!r}")
elif fprime is not None and method not in (*ode_methods, "newt-safe"):
return TypeError(f"got unexpected argument 'fprime' for method {method!r}")
elif fprime is not None and method in (*ode_methods, "newt-safe") and not callable(fprime):
return TypeError(f"expected a function for fprime, got {fprime!r}")
elif not isinstance(power, SupportsFloat):
return TypeError(f"could not interpret power as a real number, got {power!r}")
elif float(power) == 0:
return ValueError(f"power must be non-zero")
elif x is not None and not isinstance(x, SupportsFloat):
return TypeError(f"x was given but could not be interpreted as a float, got {x!r}")
elif x is not None and isnan(float(x)):
return ValueError("x is not a number")
elif y1 is not None and not isinstance(y1, SupportsFloat):
return TypeError(f"y1 was given but could not be interpreted as a float, got {y1!r}")
elif y1 is not None and isnan(float(y1)):
return ValueError("y1 is not a number")
elif y2 is not None and not isinstance(y2, SupportsFloat):
return TypeError(f"y2 was given but could not be interpreted as a float, got {y2!r}")
elif y2 is not None and isnan(float(y2)):
return ValueError("y2 is not a number")
elif not isinstance(abs_err, SupportsFloat):
return TypeError(f"abs_err could not be interpreted as a float, got {abs_err!r}")
elif isnan(float(abs_err)):
return ValueError("abs_err is not a number")
elif not isinstance(rel_err, SupportsFloat):
return TypeError(f"rel_err could not be interpreted as a float, got {rel_err!r}")
elif isnan(float(rel_err)):
return ValueError("rel_err is not a number")
elif abs_tol is not None and not isinstance(abs_tol, SupportsFloat):
return TypeError(f"abs_tol was given but could not be interpreted as a float, got {abs_tol!r}")
elif abs_tol is not None and isnan(float(abs_tol)):
return ValueError("abs_tol is not a number")
elif rel_tol is not None and not isinstance(rel_tol, SupportsFloat):
return TypeError(f"rel_tol was given but could not be interpreted as a float, got {rel_tol!r}")
elif rel_tol is not None and isnan(float(rel_tol)):
return ValueError("rel_tol is not a number")
else:
return None
def bisect_in(
f: Callable[[float], float],
x1: float,
x2: float,
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> float:
"""
The bisection method ensures a very robust worst-case scenario.
Does not converge fast for the best-case scenario. Other methods
are recommended over the bisection method. For non-simple roots,
use the 'non-simple' method instead.
Order of Convergence:
1:
Linear convergence.
See also:
'non-simple':
Fast convergence when f(x) ~ C * (x - root) ^ power.
"""
# Use an initial estimate.
if x is not None and abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
if sign(y) == sign(y1):
x1 = x2
y1 = y2
x2 = x
y2 = y
# Initial convergence to {-1, 0, 1}.
x = (sign(x1) + sign(x2)) / 2
while (
is_between(x1, x, x2)
and not is_between(x1 / 8, x2, x1 * 8)
and not (sign(x1) == sign(x2) and is_between(sqrt(abs(x1)), abs(x2), x1 * x1))
and abs(x1 - x2) > abs_err + rel_err * abs(x2)
and y2 != 0
):
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
if sign(y) == sign(y1):
x1 = x2
y1 = y2
x2 = x
y2 = y
x = (sign(x1) + sign(x2)) / 2
# Log-mean convergence.
x_sign = sign(x1)
x_abs = 1 if abs(x1) >= 1 else -1
while (
not is_between(x1 / 8, x2, x1 * 8)
and not (sign(x1) == sign(x2) and is_between(sqrt(abs(x1)), abs(x2), x1 * x1))
and abs(x1 - x2) > abs_err + rel_err * abs(x2)
and y2 != 0
):
x = x_sign * exp(x_abs * sqrt(abs(log(abs(x1)))) * sqrt(abs(log(abs(x2)))))
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
if sign(y) == sign(y1):
x1 = x2
y1 = y2
x2 = x
y2 = y
# Geometric-mean convergence.
while (
not is_between(x1 / 8, x2, x1 * 8)
and abs(x1 - x2) > abs_err + rel_err * abs(x2)
and y2 != 0
):
x = x_sign * sqrt(abs(x1)) * sqrt(abs(x2))
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
if sign(y) == sign(y1):
x1 = x2
y1 = y2
x2 = x
y2 = y
# Arithmetic-mean convergence.
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
x = x1 + 0.5 * (x2 - x1)
y = f(x)
if sign(y) == sign(y1):
x1 = x2
y1 = y2
x2 = x
y2 = y
return secant(x1, x2, y1, y2)
def bisect_iter(
f: Callable[[float], float],
x1: float,
x2: float,
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> Iterator[float]:
"""
The bisection method ensures a very robust worst-case scenario.
Does not converge fast for the best-case scenario. Other methods
are recommended over the bisection method. For non-simple roots,
use the 'non-simple' method instead.
Order of Convergence:
1:
Linear convergence.
See also:
'non-simple':
Fast convergence when f(x) ~ C * (x - root) ^ power.
"""
yield x1
yield x2
# Use an initial estimate.
if x is not None and abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
yield x
if sign(y) == sign(y1):
x1 = x2
y1 = y2
x2 = x
y2 = y
# Initial convergence to {-1, 0, 1}.
x = (sign(x1) + sign(x2)) / 2
while (
is_between(x1, x, x2)
and not is_between(x1 / 8, x2, x1 * 8)
and not (sign(x1) == sign(x2) and is_between(sqrt(abs(x1)), abs(x2), x1 * x1))
and abs(x1 - x2) > abs_err + rel_err * abs(x2)
and y2 != 0
):
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
yield x
if sign(y) == sign(y1):
x1 = x2
y1 = y2
x2 = x
y2 = y
x = (sign(x1) + sign(x2)) / 2
# Log-mean convergence.
x_sign = sign(x1)
x_abs = 1 if abs(x1) >= 1 else -1
while (
not is_between(x1 / 8, x2, x1 * 8)
and not (sign(x1) == sign(x2) and is_between(sqrt(abs(x1)), abs(x2), x1 * x1))
and abs(x1 - x2) > abs_err + rel_err * abs(x2)
and y2 != 0
):
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x = x_sign * exp(x_abs * sqrt(abs(log(abs(x1)))) * sqrt(abs(log(abs(x2)))))
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
yield x
if sign(y) == sign(y1):
x1 = x2
y1 = y2
x2 = x
y2 = y
# Geometric-mean convergence.
while (
not is_between(x1 / 8, x2, x1 * 8)
and abs(x1 - x2) > abs_err + rel_err * abs(x2)
and y2 != 0
):
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x = x_sign * sqrt(abs(x1)) * sqrt(abs(x2))
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
yield x
if sign(y) == sign(y1):
x1 = x2
y1 = y2
x2 = x
y2 = y
# Arithmetic-mean convergence.
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
x = x1 + 0.5 * (x2 - x1)
y = f(x)
yield x
if sign(y) == sign(y1):
x1 = x2
y1 = y2
x2 = x
y2 = y
yield secant(x1, x2, y1, y2)
def chandrupatla_in(
f: Callable[[float], float],
x1: float,
x2: float,
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> float:
"""
Chandrupatla's method is a robust 3-point method using inverse
quadratic interpolation, similar to Brent's method except for
the fact that it uses an intelligent linearity check to determine
if bisection should be used instead.
Unlike Chandrupatla's original implementation, this one extends
pyroot's secant implementation with more robust checks to ensure
convergence by falling back to the secant method applicable. An
advanced correction term is also used to obtain very high-order
convergence for simple roots.
Order of Convergence:
1.820 or 1.839:
Depending on f'(root), f''(root), and f'''(root), the
order of convergence may be 1.820 or 1.839.
The exact values are the roots of:
1.820: x^9 - 7x^6 + 6x^3 - 1
1.839: x^3 - x^2 - x - 1
See also:
'secant':
A robust algorithm which ensures fast and tight bracketing
for simple roots.
"""
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
x5 = x4 = x3 = x2
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
y_ratio = (y - y1) / (y2 - y1)
else:
x_ratio = (x - x2) / (x1 - x2)
y_ratio = (y - y2) / (y1 - y2)
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
if abs(x - x2) < abs(x - x3):
x5 = x4
x4 = x3
x3 = x2
x2 = x
y2 = y
x = float_mean(x1, x2)
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
if abs(x - x2) < abs(x - x3):
x5 = x4
x4 = x3
x3 = x2
x2 = x
y2 = y
x = secant(x1, x2, y1, y2)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least thrice in a row.
elif bisect_fails >= 3:
if abs(x - x2) < abs(x - x3):
x5 = x4
x4 = x3
x3 = x2
x2 = x
y2 = y
y1 /= 2
x = secant(x1, x2, y1, y2)
used_illinois = True
# Use IQI if the points are highly linear.
elif y_ratio * y_ratio < x_ratio < 1 - (1 - y_ratio) ** 2:
a = y / (y1 - y )
b = y2 / (y1 - y2)
c = y / (y2 - y )
d = y1 / (y2 - y1)
if abs(x - x2) < abs(x - x3):
x5 = x4
x4 = x3
x3 = x2
x, x2 = x + a * b * (x1 - x) + c * d * (x2 - x), x
y2 = y
# Perform adjustment if convergence fails at least twice in a row.
if bisect_fails >= 2:
# High-order convergence for simple roots: ~ 1.82.
x += (x - x2) * abs((x - x1) * ((x - x3) / (x - x5)))
# Back-up Illinois method.
x4 = secant(x1, x2, y1 / 2, y2)
# Fall-back to the Illinois method if not properly converging.
if not is_between(x4, x, x2):
y1 /= 2
x = x4
used_illinois = True
# Use the secant method with x and x2.
elif abs(x2 - x) < 1.25 * abs(x1 - x) and abs(y) < abs(y2) < 1.25 * abs(y1) and is_between(mean(x1, x), secant(x, x2, y, y2), x):
if abs(x - x2) < abs(x - x3):
x5 = x4
x4 = x3
x3 = x2
x2, x = x, secant(x, x2, y, y2)
y2 = y
# Use the secant method with x and x1.
else:
if abs(x - x2) < abs(x - x3):
x5 = x4
x4 = x3
x3 = x2
x2 = x
y2 = y
x = secant(x1, x2, y1, y2)
# Use the secant method on the final iteration for high precision.
return secant(x1, x2, y1, y2)
def chandrupatla_iter(
f: Callable[[float], float],
x1: float,
x2: float,
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> Iterator[float]:
"""
Chandrupatla's method is a robust 3-point method using inverse
quadratic interpolation, similar to Brent's method except for
the fact that it uses an intelligent linearity check to determine
if bisection should be used instead.
Unlike Chandrupatla's original implementation, this one extends
pyroot's secant implementation with more robust checks to ensure
convergence by falling back to the secant method applicable. An
advanced correction term is also used to obtain very high-order
convergence for simple roots.
Order of Convergence:
1.820 or 1.839:
Depending on f'(root), f''(root), and f'''(root), the
order of convergence may be 1.820 or 1.839.
The exact values are the roots of:
1.820: x^9 - 7x^6 + 6x^3 - 1
1.839: x^3 - x^2 - x - 1
See also:
'secant':
A robust algorithm which ensures fast and tight bracketing
for simple roots.
"""
yield x1
yield x2
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
x5 = x4 = x3 = x2
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
yield x
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
y_ratio = (y - y1) / (y2 - y1)
else:
x_ratio = (x - x2) / (x1 - x2)
y_ratio = (y - y2) / (y1 - y2)
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
if abs(x - x2) < abs(x - x3):
x5 = x4
x4 = x3
x3 = x2
x2 = x
y2 = y
x = float_mean(x1, x2)
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
if abs(x - x2) < abs(x - x3):
x5 = x4
x4 = x3
x3 = x2
x2 = x
y2 = y
x = secant(x1, x2, y1, y2)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least thrice in a row.
elif bisect_fails >= 3:
if abs(x - x2) < abs(x - x3):
x5 = x4
x4 = x3
x3 = x2
x2 = x
y2 = y
y1 /= 2
x = secant(x1, x2, y1, y2)
used_illinois = True
# Use IQI if the points are highly linear.
elif y_ratio * y_ratio < x_ratio < 1 - (1 - y_ratio) ** 2:
a = y / (y1 - y )
b = y2 / (y1 - y2)
c = y / (y2 - y )
d = y1 / (y2 - y1)
if abs(x - x2) < abs(x - x3):
x5 = x4
x4 = x3
x3 = x2
x, x2 = x + a * b * (x1 - x) + c * d * (x2 - x), x
y2 = y
# Perform adjustment if convergence fails at least twice in a row.
if bisect_fails >= 2:
# High-order convergence for simple roots: ~ 1.82.
x += (x - x2) * abs((x - x1) * ((x - x3) / (x - x5)))
# Back-up Illinois method.
x4 = secant(x1, x2, y1 / 2, y2)
# Fall-back to the Illinois method if not properly converging.
if not is_between(x4, x, x2):
y1 /= 2
x = x4
used_illinois = True
# Use the secant method with x and x2.
elif abs(x2 - x) < 1.25 * abs(x1 - x) and abs(y) < abs(y2) < 1.25 * abs(y1) and is_between(mean(x1, x), secant(x, x2, y, y2), x):
if abs(x - x2) < abs(x - x3):
x5 = x4
x4 = x3
x3 = x2
x2, x = x, secant(x, x2, y, y2)
y2 = y
# Use the secant method with x and x1.
else:
if abs(x - x2) < abs(x - x3):
x5 = x4
x4 = x3
x3 = x2
x2 = x
y2 = y
x = secant(x1, x2, y1, y2)
# Use the secant method on the final iteration for high precision.
yield secant(x1, x2, y1, y2)
def heun_ode_in(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Callable[[float, float], float],
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> float:
"""
Heun's ODE method is the ODE equivalent of the trapezoidal method
for numerical integration.
Heun's ODE method uses 2 separate `fprime` evaluations per
iteration to produce a more accurate estimate of the root compared
to the 'newt-ode' method. The additional `fprime` evaluation also
makes Heun's ODE method more robust than the 'newt-ode' method,
giving it more accurate estimates during initial estimates. Similar
to the 'newt-safe' method, robust measures are taken to ensure
tight brackets and worst-case convergence.
Order of Convergence:
3:
Cubic convergence, similar to Halley's method.
See also:
'midpoint-ode':
Uses fewer `fprime` calls per iteration at the cost of a
reduced order of convergence. Recommended if `fprime` calls
are relatively expensive compared to `f` calls.
'newt-ode':
Uses fewer `fprime` calls per iteration at the cost of a
reduced order of convergence. Also has less arithmetic
cost compared to other methods. Recommended if the
cost of the algorithm itself is significantly more
expensive than function calls.
'rk45-ode':
Uses more `fprime` calls per iteration to gain increased
order of convergence. Recommended if `fprime(x, y)` can
be computed relatively cheaply compared to `f`.
"""
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
y_ratio = (y - y1) / (y2 - y1)
else:
x_ratio = (x - x2) / (x1 - x2)
y_ratio = (y - y2) / (y1 - y2)
x2 = x
y2 = y
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
x = float_mean(x1, x2)
used_illinois = False
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
x = secant(x1, x2, y1, y2)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least thrice in a row.
elif bisect_fails >= 3:
y1 /= 2
x = secant(x1, x2, y1, y2)
used_illinois = True
# Use Huen's ODE method.
else:
yprime = fprime(x, y)
if yprime == 0:
k1 = inf
else:
k1 = y / yprime
if not is_between(x1, x - k1, x2):
x = secant(x1, x2, y1, y2)
continue
yprime = fprime(x - k1, 0)
if yprime == 0:
k2 = inf
else:
k2 = y / yprime
x -= (k1 + k2) / 2
# Fall-back to the secant method if convergence fails.
if not is_between(x1, x, x2):
x = secant(x1, x2, y1, y2)
# Use the secant method on the final iteration for high precision.
return secant(x1, x2, y1, y2)
def heun_ode_iter(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Callable[[float, float], float],
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> Iterator[float]:
"""
Heun's ODE method is the ODE equivalent of the trapezoidal method
for numerical integration.
Heun's ODE method uses 2 separate `fprime` evaluations per
iteration to produce a more accurate estimate of the root compared
to the 'newt-ode' method. The additional `fprime` evaluation also
makes Heun's ODE method more robust than the 'newt-ode' method,
giving it more accurate estimates during initial estimates. Similar
to the 'newt-safe' method, robust measures are taken to ensure
tight brackets and worst-case convergence.
Order of Convergence:
3:
Cubic convergence, similar to Halley's method.
See also:
'midpoint-ode':
Uses fewer `fprime` calls per iteration at the cost of a
reduced order of convergence. Recommended if `fprime` calls
are relatively expensive compared to `f` calls.
'newt-ode':
Uses fewer `fprime` calls per iteration at the cost of a
reduced order of convergence. Also has less arithmetic
cost compared to other methods. Recommended if the
cost of the algorithm itself is significantly more
expensive than function calls.
'rk45-ode':
Uses more `fprime` calls per iteration to gain increased
order of convergence. Recommended if `fprime(x, y)` can
be computed relatively cheaply compared to `f`.
"""
yield x1
yield x2
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
yield x
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
y_ratio = (y - y1) / (y2 - y1)
else:
x_ratio = (x - x2) / (x1 - x2)
y_ratio = (y - y2) / (y1 - y2)
x2 = x
y2 = y
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
x = float_mean(x1, x2)
used_illinois = False
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
x = secant(x1, x2, y1, y2)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least thrice in a row.
elif bisect_fails >= 3:
y1 /= 2
x = secant(x1, x2, y1, y2)
used_illinois = True
# Use Huen's ODE method.
else:
yprime = fprime(x, y)
if yprime == 0:
k1 = inf
else:
k1 = y / yprime
if not is_between(x1, x - k1, x2):
x = secant(x1, x2, y1, y2)
continue
yprime = fprime(x - k1, 0)
if yprime == 0:
k2 = inf
else:
k2 = y / yprime
x -= (k1 + k2) / 2
# Fall-back to the secant method if convergence fails.
if not is_between(x1, x, x2):
x = secant(x1, x2, y1, y2)
# Use the secant method on the final iteration for high precision.
yield secant(x1, x2, y1, y2)
def midpoint_ode_in(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Callable[[float, float], float],
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> float:
"""
The midpoint ODE method is the ODE equivalent of the midpoint
method for numerical integration.
The midpoint ODE method uses only 1 `fprime` evaluation per
iteration, similar to the 'newt-ode' method, but does not use the
derivative at the current point. Instead, a secant estimate of the
root is made, and then the derivative is evaluated between that and
the current estimate i.e. the derivative is evaluated at the
midpoint. This gives more faster and more robust convergence than
the 'newt-ode' method.
Order of Convergence:
2.414:
Between quadratic and cubic orders of convergence. Faster
than the 'newt-ode' method, but slower than the 'heun-ode'
method.
The exact value is given by the root of:
x^2 - 2x - 1
See also:
'heun-ode':
Uses more `fprime` calls per iteration to gain increased
order of convergence. Recommended if `fprime(x, y)` can
be computed relatively cheaply compared to `f`.
'newt-ode':
Uses fewer arithmetic operations per iteration. Recommended
if the cost of the algorithm itself is significantly more
expensive than function calls.
'rk45-ode':
Uses more `fprime` calls per iteration to gain increased
order of convergence. Recommended if `fprime(x, y)` can
be computed extremely cheaply compared to `f`.
"""
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
y_ratio = (y - y1) / (y2 - y1)
else:
x_ratio = (x - x2) / (x1 - x2)
y_ratio = (y - y2) / (y1 - y2)
x2 = x
y2 = y
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
x = float_mean(x1, x2)
used_illinois = False
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
x = secant(x1, x2, y1, y2)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least thrice in a row.
elif bisect_fails >= 3:
y1 /= 2
x = secant(x1, x2, y1, y2)
used_illinois = True
# Use the midpoint ODE method.
else:
if abs(x - x2) < abs(x - x1) and abs(y) < abs(y2):
k1 = secant(0.0, x - x2, y, y2)
else:
k1 = secant(0.0, x - x1, y, y1)
if not is_between(x1, x - k1 / 2, x2):
x = secant(x1, x2, y1, y2)
continue
yprime = fprime(x - k1 / 2, y / 2)
if yprime == 0:
k2 = inf
else:
k2 = y / yprime
x -= k2
# Fall-back to the secant method if convergence fails.
if not is_between(x1, x, x2):
x = secant(x1, x2, y1, y2)
# Use the secant method on the final iteration for high precision.
return secant(x1, x2, y1, y2)
def midpoint_ode_iter(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Callable[[float, float], float],
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> Iterator[float]:
"""
The midpoint ODE method is the ODE equivalent of the midpoint
method for numerical integration.
The midpoint ODE method uses only 1 `fprime` evaluation per
iteration, similar to the 'newt-ode' method, but does not use the
derivative at the current point. Instead, a secant estimate of the
root is made, and then the derivative is evaluated between that and
the current estimate i.e. the derivative is evaluated at the
midpoint. This gives more faster and more robust convergence than
the 'newt-ode' method.
Order of Convergence:
2.414:
Between quadratic and cubic orders of convergence. Faster
than the 'newt-ode' method, but slower than the 'heun-ode'
method.
The exact value is given by the root of:
x^2 - 2x - 1
See also:
'heun-ode':
Uses more `fprime` calls per iteration to gain increased
order of convergence. Recommended if `fprime(x, y)` can
be computed relatively cheaply compared to `f`.
'newt-ode':
Uses fewer arithmetic operations per iteration. Recommended
if the cost of the algorithm itself is significantly more
expensive than function calls.
'rk45-ode':
Uses more `fprime` calls per iteration to gain increased
order of convergence. Recommended if `fprime(x, y)` can
be computed extremely cheaply compared to `f`.
"""
yield x1
yield x2
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
yield x
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
y_ratio = (y - y1) / (y2 - y1)
else:
x_ratio = (x - x2) / (x1 - x2)
y_ratio = (y - y2) / (y1 - y2)
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
x2 = x
y2 = y
x = float_mean(x1, x2)
used_illinois = False
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
x2 = x
y2 = y
x = secant(x1, x2, y1, y2)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least thrice in a row.
elif bisect_fails >= 3:
x2 = x
y2 = y
y1 /= 2
x = secant(x1, x2, y1, y2)
used_illinois = True
# Use the midpoint ODE method.
else:
if abs(x - x2) < abs(x - x1) and abs(y) < abs(y2):
k1 = secant(0.0, x - x2, y, y2)
else:
k1 = secant(0.0, x - x1, y, y1)
if not is_between(x1, x - k1 / 2, x2):
x = secant(x1, x2, y1, y2)
continue
yprime = fprime(x - k1 / 2, y / 2)
if yprime == 0:
k2 = inf
else:
k2 = y / yprime
x -= k2
# Fall-back to the secant method if convergence fails.
if not is_between(x1, x, x2):
x = secant(x1, x2, y1, y2)
# Use the secant method on the final iteration for high precision.
yield secant(x1, x2, y1, y2)
def newt_ode_in(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Callable[[float, float], float],
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> float:
"""
The Newton-Raphson ODE method is the ODE equivalent of Riemann sums
for numerical integration.
The Newton-Raphson ODE method uses 1 `fprime` evaluation per
iteration with a fairly simple formula. Compared to similar
methods, the arithmetic cost per iteration is slightly reduced.
Order of Convergence:
2:
Quadratic convergence.
See also:
'heun-ode':
Uses more `fprime` calls per iteration to gain increased
order of convergence. Recommended if `fprime(x, y)` can
be computed relatively cheaply compared to `f`.
'midpoint-ode':
Uses more arithmetic operations per iteration to gain
increased order of convergence and robustness. Recommended
if function calls are somewhat expensive.
'rk45-ode':
Uses more `fprime` calls per iteration to gain increased
order of convergence. Recommended if `fprime(x, y)` can
be computed extremely cheaply compared to `f`.
"""
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
y_ratio = (y - y1) / (y2 - y1)
else:
x_ratio = (x - x2) / (x1 - x2)
y_ratio = (y - y2) / (y1 - y2)
x2 = x
y2 = y
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
x = float_mean(x1, x2)
used_illinois = False
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
x = secant(x1, x2, y1, y2)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least thrice in a row.
elif bisect_fails >= 3:
y1 /= 2
x = secant(x1, x2, y1, y2)
used_illinois = True
# Use the Newton-Raphson method.
else:
yprime = fprime(x, y)
if yprime == 0:
x = inf
else:
x -= y / yprime
# Fall-back to the secant method if convergence fails.
if not is_between(x1, x, x2):
x = secant(x1, x2, y1, y2)
# Use the secant method on the final iteration for high precision.
return secant(x1, x2, y1, y2)
def newt_ode_iter(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Callable[[float, float], float],
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> Iterator[float]:
"""
The Newton-Raphson ODE method is the ODE equivalent of Riemann sums
for numerical integration.
The Newton-Raphson ODE method uses 1 `fprime` evaluation per
iteration with a fairly simple formula. Compared to similar
methods, the arithmetic cost per iteration is slightly reduced.
Order of Convergence:
2:
Quadratic convergence.
See also:
'heun-ode':
Uses more `fprime` calls per iteration to gain increased
order of convergence. Recommended if `fprime(x, y)` can
be computed relatively cheaply compared to `f`.
'midpoint-ode':
Uses more arithmetic operations per iteration to gain
increased order of convergence and robustness. Recommended
if function calls are somewhat expensive.
'rk45-ode':
Uses more `fprime` calls per iteration to gain increased
order of convergence. Recommended if `fprime(x, y)` can
be computed extremely cheaply compared to `f`.
"""
yield x1
yield x2
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
yield x
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
y_ratio = (y - y1) / (y2 - y1)
else:
x_ratio = (x - x2) / (x1 - x2)
y_ratio = (y - y2) / (y1 - y2)
x2 = x
y2 = y
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
x = float_mean(x1, x2)
used_illinois = False
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
x = secant(x1, x2, y1, y2)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least thrice in a row.
elif bisect_fails >= 3:
y1 /= 2
x = secant(x1, x2, y1, y2)
used_illinois = True
# Use the Newton-Raphson method.
else:
yprime = fprime(x, y)
if yprime == 0:
x = inf
else:
x -= y / yprime
# Fall-back to the secant method if convergence fails.
if not is_between(x1, x, x2):
x = secant(x1, x2, y1, y2)
# Use the secant method on the final iteration for high precision.
yield secant(x1, x2, y1, y2)
def newt_safe_in(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Callable[[float], float],
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> float:
"""
The Newton-Raphson method is similar to the secant method but uses
a given derivative instead of estimating it.
Unlike the ODE methods, `fprime` only takes `x` as an argument.
Order of Convergence:
2:
Quadratic convergence.
See also:
'newt-ode':
The ODE equivalent. Contains more information. Recommended
if `fprime(x)` can be computed more efficiently if `f(x)` is
given.
"""
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
y_ratio = (y - y1) / (y2 - y1)
else:
x_ratio = (x - x2) / (x1 - x2)
y_ratio = (y - y2) / (y1 - y2)
x2 = x
y2 = y
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
x = float_mean(x1, x2)
used_illinois = False
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
x = secant(x1, x2, y1, y2)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least thrice in a row.
elif bisect_fails >= 3:
y1 /= 2
x = secant(x1, x2, y1, y2)
used_illinois = True
# Use the Newton-Raphson method.
else:
yprime = fprime(x)
if yprime == 0:
x = inf
else:
x -= y / yprime
# Fall-back to the secant method if convergence fails.
if not is_between(x1, x, x2):
x = secant(x1, x2, y1, y2)
# Use the secant method on the final iteration for high precision.
return secant(x1, x2, y1, y2)
def newt_safe_iter(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Callable[[float], float],
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> Iterator[float]:
"""
The Newton-Raphson method is similar to the secant method but uses
a given derivative instead of estimating it.
Unlike the ODE methods, `fprime` only takes `x` as an argument.
Order of Convergence:
2:
Quadratic convergence.
See also:
'newt-ode':
The ODE equivalent. Contains more information. Recommended
if `fprime(x)` can be computed more efficiently if `f(x)` is
given.
"""
yield x1
yield x2
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
yield x
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
y_ratio = (y - y1) / (y2 - y1)
else:
x_ratio = (x - x2) / (x1 - x2)
y_ratio = (y - y2) / (y1 - y2)
x2 = x
y2 = y
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
x = float_mean(x1, x2)
used_illinois = False
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
x = secant(x1, x2, y1, y2)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least thrice in a row.
elif bisect_fails >= 3:
y1 /= 2
x = secant(x1, x2, y1, y2)
used_illinois = True
# Use the Newton-Raphson method.
else:
yprime = fprime(x)
if yprime == 0:
x = inf
else:
x -= y / yprime
# Fall-back to the secant method if convergence fails.
if not is_between(x1, x, x2):
x = secant(x1, x2, y1, y2)
# Use the secant method on the final iteration for high precision.
yield secant(x1, x2, y1, y2)
def nonsimple_in(
f: Callable[[float], float],
x1: float,
x2: float,
power: float,
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> float:
"""
The non-simple method excels at finding non-simple roots.
Non-simple roots occur when `|f(x)| ~ C * |x - root| ^ power` when
the `power` is not `1`. This is done by estimating the `power` each
iteration and rescaling f(x) to make it linear. The secant method
is then applied to the rescaled function values.
Although superlinear convergence is eventually reached, many
initial iterations may be spent performing slow approximations
while the `power` is still inaccurate. Consider providing the
`power` if it is known ahead of time.
Order of Convergence:
1.618:
The same order of convergence as the secant method, but
applies to non-simple roots such as `|x - 5| * (x - 5)`.
The exact value is given by the root of:
x^2 - x - 1
See also:
'secant':
Equivalent method for simple roots.
"""
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
# Estimate the order of the root.
power = power_estimate(x1, x, x2, y1, y, y2, power)
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
y_min, _, y_max = sorted([abs(y), abs(y1), abs(y2)])
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
if 0.6 < power < 1.4:
numerator = y / y_max - y1 / y_max
denominator = y2 / y_max - y1 / y_max
elif power > 0:
numerator = signed_pow(y / y_max, power) - signed_pow(y1 / y_max, power)
denominator = signed_pow(y2 / y_max, power) - signed_pow(y1 / y_max, power)
else:
numerator = signed_pow(y / y_min, power) - signed_pow(y1 / y_min, power)
denominator = signed_pow(y2 / y_min, power) - signed_pow(y1 / y_min, power)
else:
x_ratio = (x - x2) / (x1 - x2)
if 0.6 < power < 1.4:
numerator = y / y_max - y2 / y_max
denominator = y1 / y_max - y2 / y_max
elif power > 0:
numerator = signed_pow(y / y_max, power) - signed_pow(y2 / y_max, power)
denominator = signed_pow(y1 / y_max, power) - signed_pow(y2 / y_max, power)
else:
numerator = signed_pow(y / y_min, power) - signed_pow(y2 / y_min, power)
denominator = signed_pow(y1 / y_min, power) - signed_pow(y2 / y_min, power)
y_ratio = numerator / denominator
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
x2 = x
y2 = y
x = float_mean(x1, x2)
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
x2 = x
y2 = y
x = secant(x1, x2, y1, y2, power)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least twice in a row.
elif bisect_fails >= 2:
x2 = x
y2 = y
y = y1 / 2 ** min(10 * sign(power), 1 / power, key=abs)
x = secant(x1, x2, y, y2, power)
used_illinois = True
# Use the secant method with x and x2.
elif abs(x2 - x) < 1.25 * abs(x1 - x) and abs(y) < abs(y2) < 1.25 * abs(y1) and is_between(mean(x1, x), secant(x, x2, y, y2), x):
x2, x = x, secant(x, x2, y, y2, power)
y2 = y
if not is_between(x1, x, x2):
x = secant(x1, x2, y1, y2, power)
# Use the secant method with x and x1.
else:
x2 = x
y2 = y
x = secant(x1, x2, y1, y2, power)
# Use the secant method on the final iteration for high precision.
if 0.6 < power < 1.4:
return secant(x1, x2, y1, y2)
else:
return secant(x1, x2, y1, y2, power)
def nonsimple_iter(
f: Callable[[float], float],
x1: float,
x2: float,
power: float,
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> Iterator[float]:
"""
The non-simple method excels at finding non-simple roots.
Non-simple roots occur when `|f(x)| ~ C * |x - root| ^ power` when
the `power` is not `1`. This is done by estimating the `power` each
iteration and rescaling f(x) to make it linear. The secant method
is then applied to the rescaled function values.
Although superlinear convergence is eventually reached, many
initial iterations may be spent performing slow approximations
while the `power` is still inaccurate. Consider providing the
`power` if it is known ahead of time.
Order of Convergence:
1.618:
The same order of convergence as the secant method, but
applies to non-simple roots such as `|x - 5| * (x - 5)`.
The exact value is given by the root of:
x^2 - x - 1
See also:
'secant':
Equivalent method for simple roots.
"""
yield x1
yield x2
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
yield x
# Estimate the order of the root.
power = power_estimate(x1, x, x2, y1, y, y2, power)
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
y_min, _, y_max = sorted([abs(y), abs(y1), abs(y2)])
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
if 0.6 < power < 1.4:
numerator = y / y_max - y1 / y_max
denominator = y2 / y_max - y1 / y_max
elif power > 0:
numerator = signed_pow(y / y_max, power) - signed_pow(y1 / y_max, power)
denominator = signed_pow(y2 / y_max, power) - signed_pow(y1 / y_max, power)
else:
numerator = signed_pow(y / y_min, power) - signed_pow(y1 / y_min, power)
denominator = signed_pow(y2 / y_min, power) - signed_pow(y1 / y_min, power)
else:
x_ratio = (x - x2) / (x1 - x2)
if 0.6 < power < 1.4:
numerator = y / y_max - y2 / y_max
denominator = y1 / y_max - y2 / y_max
elif power > 0:
numerator = signed_pow(y / y_max, power) - signed_pow(y2 / y_max, power)
denominator = signed_pow(y1 / y_max, power) - signed_pow(y2 / y_max, power)
else:
numerator = signed_pow(y / y_min, power) - signed_pow(y2 / y_min, power)
denominator = signed_pow(y1 / y_min, power) - signed_pow(y2 / y_min, power)
y_ratio = numerator / denominator
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
x2 = x
y2 = y
x = float_mean(x1, x2)
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
x2 = x
y2 = y
x = secant(x1, x2, y1, y2, power)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least twice in a row.
elif bisect_fails >= 2:
x2 = x
y2 = y
y = y1 / 2 ** min(10 * sign(power), 1 / power, key=abs)
x = secant(x1, x2, y, y2, power)
used_illinois = True
# Use the secant method with x and x2.
elif abs(x2 - x) < 1.25 * abs(x1 - x) and abs(y) < abs(y2) < 1.25 * abs(y1) and is_between(mean(x1, x), secant(x, x2, y, y2), x):
x2, x = x, secant(x, x2, y, y2, power)
y2 = y
if not is_between(x1, x, x2):
x = secant(x1, x2, y1, y2, power)
# Use the secant method with x and x1.
else:
x2 = x
y2 = y
x = secant(x1, x2, y1, y2, power)
# Use the secant method on the final iteration for high precision.
if 0.6 < power < 1.4:
yield secant(x1, x2, y1, y2)
else:
yield secant(x1, x2, y1, y2, power)
def rk45_ode_in(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Callable[[float, float], float],
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> float:
"""
The RK45 ODE method is the ODE equivalent of Simpson's method for
numerical integration.
The RK45 ODE method uses 4 `fprime` evaluations per iteration to
reach extremely high orders of convergence. The RK45 ODE method
is recommended if `fprime` evaluations are exceptionally cheap to
compute compared to `f` evaluations, such as for special functions
like the error function.
Order of Convergence:
5:
Quintic convergence.
See also:
'heun-ode':
Uses fewer `fprime` calls per iteration to gain increased
order of convergence. Recommended if `fprime(x, y)` can
be computed relatively cheaply compared to `f`.
'midpoint-ode':
Uses significantly fewer `fprime` calls per iteration, but
more arithmetic operations to compensate the order of
convergence and its robustness. Recommended if function
calls are somewhat expensive.
'newt-ode':
Uses significantly fewer `fprime` calls per iteration as
well as fewer arithmetic operations per iteration.
Recommended if the cost of the algorithm itself is
significantly more expensive than function calls.
"""
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
y_ratio = (y - y1) / (y2 - y1)
else:
x_ratio = (x - x2) / (x1 - x2)
y_ratio = (y - y2) / (y1 - y2)
x2 = x
y2 = y
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
x = float_mean(x1, x2)
used_illinois = False
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
x = secant(x1, x2, y1, y2)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least thrice in a row.
elif bisect_fails >= 3:
y1 /= 2
x = secant(x1, x2, y1, y2)
used_illinois = True
# Use the RK45 ODE method.
else:
k1 = y / fprime(x, y)
k2 = y / fprime(x - k1 / 2, y / 2)
k3 = y / fprime(x - k2 / 2, y / 2)
k4 = y / fprime(x - k3, 0)
x -= (k1 + 2 * k2 + 2 * k3 + k4) / 6
# Fall-back to the secant method if convergence fails.
if not is_between(x1, x, x2):
x = secant(x1, x2, y1, y2)
# Use the secant method on the final iteration for high precision.
return secant(x1, x2, y1, y2)
def rk45_ode_iter(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Callable[[float, float], float],
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> Iterator[float]:
"""
The RK45 ODE method is the ODE equivalent of Simpson's method for
numerical integration.
The RK45 ODE method uses 4 `fprime` evaluations per iteration to
reach extremely high orders of convergence. The RK45 ODE method
is recommended if `fprime` evaluations are exceptionally cheap to
compute compared to `f` evaluations, such as for special functions
like the error function.
Order of Convergence:
5:
Quintic convergence.
See also:
'heun-ode':
Uses fewer `fprime` calls per iteration to gain increased
order of convergence. Recommended if `fprime(x, y)` can
be computed relatively cheaply compared to `f`.
'midpoint-ode':
Uses significantly fewer `fprime` calls per iteration, but
more arithmetic operations to compensate the order of
convergence and its robustness. Recommended if function
calls are somewhat expensive.
'newt-ode':
Uses significantly fewer `fprime` calls per iteration as
well as fewer arithmetic operations per iteration.
Recommended if the cost of the algorithm itself is
significantly more expensive than function calls.
"""
yield x1
yield x2
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
yield x
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
y_ratio = (y - y1) / (y2 - y1)
else:
x_ratio = (x - x2) / (x1 - x2)
y_ratio = (y - y2) / (y1 - y2)
x2 = x
y2 = y
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
x = float_mean(x1, x2)
used_illinois = False
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
x = secant(x1, x2, y1, y2)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least thrice in a row.
elif bisect_fails >= 3:
y1 /= 2
x = secant(x1, x2, y1, y2)
used_illinois = True
# Use the RK45 ODE method.
else:
k1 = y / fprime(x, y)
k2 = y / fprime(x - k1 / 2, y / 2)
k3 = y / fprime(x - k2 / 2, y / 2)
k4 = y / fprime(x - k3, 0)
x -= (k1 + 2 * k2 + 2 * k3 + k4) / 6
# Fall-back to the secant method if convergence fails.
if not is_between(x1, x, x2):
x = secant(x1, x2, y1, y2)
# Use the secant method on the final iteration for high precision.
yield secant(x1, x2, y1, y2)
def secant_in(
f: Callable[[float], float],
x1: float,
x2: float,
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> float:
"""
The secant method is a classical method which estimates the root
using linear interpolations.
Unlike the classical secant method, pyroot's implementation
specializes in tight bracketing to guarantee convergence to the
root. Unlike the Regula Falsi or Illinois methods, there is no
decrease in order of convergence while maintaining a tight bracket.
Unlike Dekker's method, worst-case linear convergence is guaranteed
and the robustness of the algorithm is improved at the cost of more
arithmetic operations per iteration due to a modification of
Chandrupatla's method.
Order of Convergence:
1.618:
The same order of convergence as the original secant
method.
The exact value is given by the root of:
x^2 - x - 1
See also:
'chandrupatla':
Uses higher order interpolation, obtaining a higher order
of convergence at the cost of a bit more arithmetic
operations per iteration.
'newt-safe':
Uses a given derivative instead of approximating the
derivative using finite differences.
"""
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
y_ratio = (y - y1) / (y2 - y1)
else:
x_ratio = (x - x2) / (x1 - x2)
y_ratio = (y - y2) / (y1 - y2)
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
x2 = x
y2 = y
x = float_mean(x1, x2)
used_illinois = False
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
x2 = x
y2 = y
x = secant(x1, x2, y1, y2)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least twice in a row.
elif bisect_fails >= 2:
x2 = x
y2 = y
y1 /= 2
x = secant(x1, x2, y1, y2)
used_illinois = True
# Use the secant method with x and x2.
elif abs(x2 - x) < 1.25 * abs(x1 - x) and abs(y) < abs(y2) < 1.25 * abs(y1) and is_between(mean(x1, x), secant(x, x2, y, y2), x):
x2, x = x, secant(x, x2, y, y2)
y2 = y
# Use the secant method with x and x1.
else:
x2 = x
y2 = y
x = secant(x1, x2, y1, y2)
# Use the secant method on the final iteration for high precision.
return secant(x1, x2, y1, y2)
def secant_iter(
f: Callable[[float], float],
x1: float,
x2: float,
x: Optional[float],
y1: float,
y2: float,
abs_err: float,
rel_err: float,
abs_tol: float,
rel_tol: float,
) -> Iterator[float]:
"""
The secant method is a classical method which estimates the root
using linear interpolations.
Unlike the classical secant method, pyroot's implementation
specializes in tight bracketing to guarantee convergence to the
root. Unlike the Regula Falsi or Illinois methods, there is no
decrease in order of convergence while maintaining a tight bracket.
Unlike Dekker's method, worst-case linear convergence is guaranteed
and the robustness of the algorithm is improved at the cost of more
arithmetic operations per iteration due to a modification of
Chandrupatla's method.
Order of Convergence:
1.618:
The same order of convergence as the original secant
method.
The exact value is given by the root of:
x^2 - x - 1
See also:
'chandrupatla':
Uses higher order interpolation, obtaining a higher order
of convergence at the cost of a bit more arithmetic
operations per iteration.
'newt-safe':
Uses a given derivative instead of approximating the
derivative using finite differences.
"""
yield x1
yield x2
# Generate initial point using the Bisection method.
if x is None:
x = mean(x1, x2)
# Track how many times convergence fails in a row before using the Illinois method.
bisect_fails = 0
# Track if the Illinois method was used to use the Illinois-Bisection method next.
used_illinois = False
while abs(x1 - x2) > abs_err + rel_err * abs(x2) and y2 != 0:
if abs(x1 - x2) < 16 * (abs_tol + rel_tol * abs(x)):
abs_tol = abs_err
rel_tol = rel_err
x += 0.25 * (abs_tol + rel_tol * abs(x)) * sign((x1 - x) + (x2 - x))
y = f(x)
yield x
# Swap points to ensure x replaces x2.
if sign(y) == sign(y1):
x1, x2, y1, y2 = x2, x1, y2, y1
# Track how many times convergence fails in a row before using the Illinois method.
if abs(x - x1) > 0.75 * abs(x1 - x2):
bisect_fails += 1
else:
bisect_fails = 0
if abs(x - x1) < abs(x - x2):
x_ratio = (x - x1) / (x2 - x1)
y_ratio = (y - y1) / (y2 - y1)
else:
x_ratio = (x - x2) / (x1 - x2)
y_ratio = (y - y2) / (y1 - y2)
# Use the bisection method if not linear-ish.
if not x_ratio > (x_ratio * x_ratio + y_ratio * y_ratio - 1) / 2 < y_ratio:
x2 = x
y2 = y
x = float_mean(x1, x2)
used_illinois = False
# Follow-up the Illinois method with the Illinois-Bisection method.
elif used_illinois:
x2 = x
y2 = y
x = secant(x1, x2, y1, y2)
dx = mean(x1, x2) - x
x += dx * (dx / (x1 - x2)) ** 2
used_illinois = False
# Force the Illinois method if convergence fails at least twice in a row.
elif bisect_fails >= 2:
x2 = x
y2 = y
y1 /= 2
x = secant(x1, x2, y1, y2)
used_illinois = True
# Use the secant method with x and x2.
elif abs(x2 - x) < 1.25 * abs(x1 - x) and abs(y) < abs(y2) < 1.25 * abs(y1) and is_between(mean(x1, x), secant(x, x2, y, y2), x):
x2, x = x, secant(x, x2, y, y2)
y2 = y
# Use the secant method with x and x1.
else:
x2 = x
y2 = y
x = secant(x1, x2, y1, y2)
# Use the secant method on the final iteration for high precision.
yield secant(x1, x2, y1, y2)
@overload
def root_in(
f: Callable[[float], float],
x1: float,
x2: float,
*,
x: Optional[float] = ...,
y1: Optional[float] = ...,
y2: Optional[float] = ...,
abs_err: float = ...,
rel_err: float = ...,
abs_tol: Optional[float] = ...,
rel_tol: Optional[float] = ...,
method: DerivativeFreeMethod = ...,
) -> float:
...
@overload
def root_in(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Callable[[float, float], float],
*,
x: Optional[float] = ...,
y1: Optional[float] = ...,
y2: Optional[float] = ...,
abs_err: float = ...,
rel_err: float = ...,
abs_tol: Optional[float] = ...,
rel_tol: Optional[float] = ...,
method: ODEMethod,
) -> float:
...
@overload
def root_in(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Callable[[float], float],
*,
x: Optional[float] = ...,
y1: Optional[float] = ...,
y2: Optional[float] = ...,
abs_err: float = ...,
rel_err: float = ...,
abs_tol: Optional[float] = ...,
rel_tol: Optional[float] = ...,
method: Literal["newt-safe"],
) -> float:
...
@overload
def root_in(
f: Callable[[float], float],
x1: float,
x2: float,
power: float = ...,
*,
x: Optional[float] = ...,
y1: Optional[float] = ...,
y2: Optional[float] = ...,
abs_err: float = ...,
rel_err: float = ...,
abs_tol: Optional[float] = ...,
rel_tol: Optional[float] = ...,
method: Literal["non-simple"],
) -> float:
...
def root_in(
f,
x1,
x2,
fprime=None,
power=1.0,
*,
x=None,
y1=None,
y2=None,
abs_err=0.0,
rel_err=32 * FLOAT_EPSILON,
abs_tol=None,
rel_tol=None,
method="chandrupatla",
):
"""
Finds a bracketed root of a function.
Bracketing methods allow roots of a function to be found quickly
provided two initial points where one yields a negative value and
the other yields a positive value.
Parameters:
f:
The objective function for which the `x` in `f(x) = 0` is
solved for.
x1, x2:
Two initial points where `sign(f(x1)) == -sign(f(x2))`.
fprime:
The derivative of `f` for 'newt-safe' or an ODE method.
power, default 1:
An estimate of the order of the root for 'non-simple'.
x:
An initial estimate of the root which can be used to
jump-start the initial convergence.
y1, y2:
Pre-computed initial values for `f(x1)` and `f(x2)`.
abs_err, default 0:
The desired absolute error.
rel_err, default 32 * FLOAT_EPSILON:
The desired relative error, default near machine precision.
abs_tol, rel_tol:
An initial tolerance to help jump-start an initially tight
bracket.
method, default 'chandrupatla':
The method used to find the root. Different methods have
different advantages and disadvantages, which should be
considered. The default method is highly robust.
Returns:
x:
The estimate of the root.
"""
exception = type_check(f, x1, x2, fprime, power, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol, method)
if exception is not None:
raise exception
x1 = float(x1)
x2 = float(x2)
if x is not None:
x = float(x)
if not is_between(x1, x, x2):
x = None
if isinf(x1):
x1 = sign(x1) * FLOAT_MAX
if isinf(x2):
x2 = sign(x2) * FLOAT_MAX
if y1 is None:
y1 = f(x1)
else:
y1 = float(y1)
if y2 is None:
y2 = f(x2)
else:
y2 = float(y2)
if sign(y1) != -sign(y2):
raise ValueError("sign(f(x1)) is not the opposite of sign(f(x2))")
if abs(y1) < abs(y2):
x1, x2, y1, y2 = x2, x1, y2, y1
abs_err = max(float(abs_err), FLOAT_MIN)
rel_err = max(float(rel_err), 32 * FLOAT_EPSILON)
if rel_tol is None:
rel_tol = rel_err / (1024 * FLOAT_EPSILON)
else:
rel_tol = max(float(rel_tol), rel_err)
if abs_tol is None:
abs_tol = rel_tol * min(1, abs(x1 - x2))
else:
abs_tol = max(float(abs_tol), abs_err)
rel_err = min(rel_err, 0.5)
rel_tol = min(rel_tol, 0.5)
if method == "bisect":
return bisect_in(f, x1, x2, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "chandrupatla":
return chandrupatla_in(f, x1, x2, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "heun-ode":
return heun_ode_in(f, x1, x2, fprime, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "midpoint-ode":
return midpoint_ode_in(f, x1, x2, fprime, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "newt-safe":
return newt_ode_in(f, x1, x2, fprime, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "newt-safe":
return newt_safe_in(f, x1, x2, fprime, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "non-simple":
return nonsimple_in(f, x1, x2, 1 / float(power), x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "rk45-ode":
return rk45_ode_in(f, x1, x2, fprime, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "secant":
return secant_in(f, x1, x2, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
else:
assert False, f"Missing method {method!r}"
@overload
def root_iter(
f: Callable[[float], float],
x1: float,
x2: float,
*,
x: Optional[float] = ...,
y1: Optional[float] = ...,
y2: Optional[float] = ...,
abs_err: float = ...,
rel_err: float = ...,
abs_tol: Optional[float] = ...,
rel_tol: Optional[float] = ...,
method: DerivativeFreeMethod = ...,
) -> Iterator[float]:
...
@overload
def root_iter(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Callable[[float, float], float],
*,
x: Optional[float] = ...,
y1: Optional[float] = ...,
y2: Optional[float] = ...,
abs_err: float = ...,
rel_err: float = ...,
abs_tol: Optional[float] = ...,
rel_tol: Optional[float] = ...,
method: ODEMethod,
) -> Iterator[float]:
...
@overload
def root_iter(
f: Callable[[float], float],
x1: float,
x2: float,
fprime: Callable[[float], float],
*,
x: Optional[float] = ...,
y1: Optional[float] = ...,
y2: Optional[float] = ...,
abs_err: float = ...,
rel_err: float = ...,
abs_tol: Optional[float] = ...,
rel_tol: Optional[float] = ...,
method: Literal["newt-safe"],
) -> Iterator[float]:
...
@overload
def root_iter(
f: Callable[[float], float],
x1: float,
x2: float,
power: float = ...,
*,
x: Optional[float] = ...,
y1: Optional[float] = ...,
y2: Optional[float] = ...,
abs_err: float = ...,
rel_err: float = ...,
abs_tol: Optional[float] = ...,
rel_tol: Optional[float] = ...,
method: Literal["non-simple"],
) -> Iterator[float]:
...
def root_iter(
f,
x1,
x2,
fprime=None,
power=1.0,
*,
x=None,
y1=None,
y2=None,
abs_err=0.0,
rel_err=32 * FLOAT_EPSILON,
abs_tol=None,
rel_tol=None,
method="chandrupatla",
):
"""
Finds a bracketed root of a function.
Bracketing methods allow roots of a function to be found quickly
provided two initial points where one yields a negative value and
the other yields a positive value.
Parameters:
f:
The objective function for which the `x` in `f(x) = 0` is
solved for.
x1, x2:
Two initial points where `sign(f(x1)) == -sign(f(x2))`.
fprime:
The derivative of `f` for 'newt-safe' or an ODE method.
power, default 1:
An estimate of the order of the root for 'non-simple'.
x:
An initial estimate of the root which can be used to
jump-start the initial convergence.
y1, y2:
Pre-computed initial values for `f(x1)` and `f(x2)`.
abs_err, default 0:
The desired absolute error.
rel_err, default 32 * FLOAT_EPSILON:
The desired relative error, default near machine precision.
abs_tol, rel_tol:
An initial tolerance to help jump-start an initially tight
bracket.
method, default 'chandrupatla':
The method used to find the root. Different methods have
different advantages and disadvantages, which should be
considered. The default method is highly robust.
Yields:
x:
The current estimate of the root. Use `functools.lru_cache`
to cache the most recent `f(x)` evaluation, if necessary.
Note:
The final iteration of `f(x)` is not evaluated by pyroot.
"""
exception = type_check(f, x1, x2, fprime, power, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol, method)
if exception is not None:
raise exception
x1 = float(x1)
x2 = float(x2)
if x is not None:
x = float(x)
if not is_between(x1, x, x2):
x = None
if isinf(x1):
x1 = sign(x1) * FLOAT_MAX
if isinf(x2):
x2 = sign(x2) * FLOAT_MAX
if y1 is None:
y1 = f(x1)
else:
y1 = float(y1)
if y2 is None:
y2 = f(x2)
else:
y2 = float(y2)
if sign(y1) != -sign(y2):
raise ValueError("sign(f(x1)) is not the opposite of sign(f(x2))")
if abs(y1) < abs(y2):
x1, x2, y1, y2 = x2, x1, y2, y1
abs_err = max(float(abs_err), FLOAT_MIN)
rel_err = max(float(rel_err), 32 * FLOAT_EPSILON)
if rel_tol is None:
rel_tol = rel_err / (1024 * FLOAT_EPSILON)
else:
rel_tol = max(float(rel_tol), rel_err)
if abs_tol is None:
abs_tol = rel_tol * min(1, abs(x1 - x2))
else:
abs_tol = max(float(abs_tol), abs_err)
rel_err = min(rel_err, 0.5)
rel_tol = min(rel_tol, 0.5)
if method == "bisect":
return bisect_iter(f, x1, x2, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "chandrupatla":
return chandrupatla_iter(f, x1, x2, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "heun-ode":
return heun_ode_iter(f, x1, x2, fprime, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "midpoint-ode":
return midpoint_ode_iter(f, x1, x2, fprime, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "newt-safe":
return newt_ode_iter(f, x1, x2, fprime, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "newt-safe":
return newt_safe_iter(f, x1, x2, fprime, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "non-simple":
return nonsimple_iter(f, x1, x2, 1 / float(power), x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "rk45-ode":
return rk45_ode_iter(f, x1, x2, fprime, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
elif method == "secant":
return secant_iter(f, x1, x2, x, y1, y2, abs_err, rel_err, abs_tol, rel_tol)
else:
assert False, f"Missing method {method!r}"
| 36.647776
| 161
| 0.54493
| 13,402
| 93,122
| 3.706163
| 0.03492
| 0.024079
| 0.012321
| 0.016106
| 0.939219
| 0.930501
| 0.923777
| 0.911858
| 0.908959
| 0.905114
| 0
| 0.048538
| 0.349327
| 93,122
| 2,540
| 162
| 36.662205
| 0.771207
| 0.34007
| 0
| 0.898618
| 0
| 0
| 0.027293
| 0
| 0
| 0
| 0
| 0
| 0.001152
| 1
| 0.021313
| false
| 0.000576
| 0.003456
| 0
| 0.06682
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
70fec7da1cd7f19941b79b4e41b643113716a444
| 39,438
|
py
|
Python
|
tests/io/test_exclusions.py
|
sbiradarctr/pyTenable
|
2a6930cd7b29036780c291581d89ab33c0fd6679
|
[
"MIT"
] | null | null | null |
tests/io/test_exclusions.py
|
sbiradarctr/pyTenable
|
2a6930cd7b29036780c291581d89ab33c0fd6679
|
[
"MIT"
] | null | null | null |
tests/io/test_exclusions.py
|
sbiradarctr/pyTenable
|
2a6930cd7b29036780c291581d89ab33c0fd6679
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from tenable.errors import *
from ..checker import check, single
from tests.io.test_networks import network
import uuid, pytest
@pytest.fixture
@pytest.mark.vcr()
def exclusion(request, api):
excl = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
def teardown():
try:
api.exclusions.delete(excl['id'])
except NotFoundError:
pass
request.addfinalizer(teardown)
return excl
@pytest.mark.vcr()
def test_exclusions_create_name_typeerror(api):
with pytest.raises(TypeError):
api.exclusions.create(1, ['127.0.0.1'],
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
@pytest.mark.vcr()
def test_exclusions_create_members_typeerror(api):
with pytest.raises(TypeError):
api.exclusions.create(str(uuid.uuid4), '127.0.0.1',
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
@pytest.mark.vcr()
def test_exclusions_create_start_time_typeerror(api):
with pytest.raises(TypeError):
api.exclusions.create(str(uuid.uuid4), ['127.0.0.1'],
start_time='now',
end_time=datetime.utcnow() + timedelta(hours=1))
@pytest.mark.vcr()
def test_exclusions_create_end_time_typeerror(api):
with pytest.raises(TypeError):
api.exclusions.create(str(uuid.uuid4), ['127.0.0.1'],
start_time=datetime.utcnow(),
end_time='later')
@pytest.mark.vcr()
def test_exclusions_create_timezone_typeerror(api):
with pytest.raises(TypeError):
api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
timezone=1,
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
@pytest.mark.vcr()
def test_exclusions_create_timezone_unexpectedvalue(api):
with pytest.raises(UnexpectedValueError):
api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
timezone='the zone of time',
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
@pytest.mark.vcr()
def test_exclusions_create_description_typeerror(api):
with pytest.raises(TypeError):
api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
description=1,
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
@pytest.mark.vcr()
def test_exclusions_create_frequency_typeerror(api):
with pytest.raises(TypeError):
api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
frequency=1,
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
@pytest.mark.vcr()
def test_exclusions_create_frequency_unexpectedvalue(api):
with pytest.raises(UnexpectedValueError):
api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
frequency='nope',
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
@pytest.mark.vcr()
def test_exclusions_create_interval_typeerror(api):
with pytest.raises(TypeError):
api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
interval='nope',
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
@pytest.mark.vcr()
def test_exclusions_create_weekdays_typeerror(api):
with pytest.raises(TypeError):
api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
weekdays='nope',
frequency='weekly',
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
@pytest.mark.vcr()
def test_exclusions_create_weekdays_unexpectedvalue(api):
with pytest.raises(UnexpectedValueError):
api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
weekdays=['MO', 'WE', 'nope'],
frequency='weekly',
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
@pytest.mark.vcr()
def test_exclusions_create_day_of_month_typeerror(api):
with pytest.raises(TypeError):
api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
day_of_month='nope',
frequency='monthly',
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
@pytest.mark.vcr()
def test_exclusions_create_day_of_month_unexpectedvalue(api):
with pytest.raises(UnexpectedValueError):
api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
day_of_month=82,
frequency='monthly',
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
@pytest.mark.vcr()
def test_exclusions_create_enabled_typeerror(api):
with pytest.raises(TypeError):
api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'], enabled='yup')
@pytest.mark.vcr()
def test_exclusions_create_standard_user_permissionerror(stdapi):
with pytest.raises(PermissionError):
stdapi.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
@pytest.mark.vcr()
def test_exclusions_create_with_selected_network_unexpectedvalueerror(api):
with pytest.raises(UnexpectedValueError):
api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1),
network_id='nope')
@pytest.mark.vcr()
def test_exclusions_create_with_selected_network_typeerror(api):
with pytest.raises(TypeError):
api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1),
network_id=1)
@pytest.mark.vcr()
def test_exclusions_create_with_selected_network_notfounderror(api):
with pytest.raises(NotFoundError):
api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1),
network_id='00000000-0000-0000-0000-100000000001')
@pytest.mark.vcr()
def test_exclusions_create_onetime_exclusion(api):
resp = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1))
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_create_daily_exclusion(api):
resp = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1),
frequency='daily')
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_create_weekly_exclusion(api):
resp = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1),
frequency='weekly',
weekdays=['mo', 'we', 'fr'])
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'byweekday', str)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_create_monthly_exclusion(api):
resp = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1),
frequency='monthly',
day_of_month=15)
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'bymonthday', int)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_create_yearly_exclusion(api):
resp = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1),
frequency='yearly')
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_create_enabled_false_exclusion(api):
resp = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'], enabled=False)
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
assert resp['schedule']['enabled'] == False
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_create_with_selected_network_exclusion(api, network):
resp = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1),
frequency='yearly',
network_id=network['uuid'])
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'network_id', 'uuid')
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
details = api.exclusions.details(resp['id'])
assert details['network_id'] == network['uuid']
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_create_with_default_network_exclusion(api):
resp = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1),
frequency='yearly')
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'network_id', 'uuid')
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
details = api.exclusions.details(resp['id'])
assert details['network_id'] == '00000000-0000-0000-0000-000000000000'
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_delete_notfounderror(api):
with pytest.raises(NotFoundError):
api.exclusions.delete(999999)
@pytest.mark.vcr()
def test_exclusions_delete_exclusion(api, exclusion):
api.exclusions.delete(exclusion['id'])
@pytest.mark.vcr()
def test_exclusions_delete_standard_user_fail(stdapi, exclusion):
with pytest.raises(PermissionError):
stdapi.exclusions.delete(exclusion['id'])
@pytest.mark.vcr()
def test_exclusions_edit_no_exclusion_id_typeerror(api):
with pytest.raises(TypeError):
api.exclusions.edit()
@pytest.mark.vcr()
def test_exclusions_edit_exclusion_id_typeerror(api):
with pytest.raises(TypeError):
api.exclusions.edit('nope')
@pytest.mark.vcr()
def test_exclusions_edit_members_typeerror(api, exclusion):
with pytest.raises(TypeError):
api.exclusions.edit(exclusion['id'], members='192.168.0.1')
@pytest.mark.vcr()
def test_exclusions_edit_name_typeerror(api, exclusion):
with pytest.raises(TypeError):
api.exclusions.edit(exclusion['id'], name=1.02)
@pytest.mark.vcr()
def test_exclusions_edit_starttime_typerror(api, exclusion):
with pytest.raises(TypeError):
api.exclusions.edit(exclusion['id'], start_time='nope')
@pytest.mark.vcr()
def test_exclusions_edit_timezone_typerror(api, exclusion):
with pytest.raises(TypeError):
api.exclusions.edit(exclusion['id'], timezone=1)
@pytest.mark.vcr()
def test_exclusions_edit_timezone_unexpectedvalue(api, exclusion):
with pytest.raises(UnexpectedValueError):
api.exclusions.edit(exclusion['id'], timezone='nope')
@pytest.mark.vcr()
def test_exclusions_edit_description_typerror(api, exclusion):
with pytest.raises(TypeError):
api.exclusions.edit(exclusion['id'], description=1)
@pytest.mark.vcr()
def test_exclusions_edit_frequency_typerror(api, exclusion):
with pytest.raises(TypeError):
api.exclusions.edit(exclusion['id'], frequency=1)
@pytest.mark.vcr()
def test_exclusions_edit_frequency_unexpectedvalue(api, exclusion):
with pytest.raises(UnexpectedValueError):
api.exclusions.edit(exclusion['id'], frequency='nope')
@pytest.mark.vcr()
def test_exclusions_edit_interval_typerror(api, exclusion):
with pytest.raises(TypeError):
api.exclusions.edit(exclusion['id'], interval='nope')
@pytest.mark.vcr()
def test_exclusions_edit_weekdays_typerror(api, exclusion):
with pytest.raises(TypeError):
api.exclusions.edit(exclusion['id'], frequency='Weekly', weekdays='nope')
@pytest.mark.vcr()
def test_exclusions_edit_weekdays_unexpectedvalue(api, exclusion):
with pytest.raises(UnexpectedValueError):
api.exclusions.edit(exclusion['id'], frequency='Weekly', weekdays=['MO', 'WE', 'nope'])
@pytest.mark.vcr()
def test_exclusions_edit_dayofmonth_typerror(api, exclusion):
with pytest.raises(TypeError):
api.exclusions.edit(exclusion['id'], frequency='monthly', day_of_month='nope')
@pytest.mark.vcr()
def test_exclusions_edit_dayofmonth_unexpectedvalue(api, exclusion):
with pytest.raises(UnexpectedValueError):
api.exclusions.edit(exclusion['id'], frequency='monthly', day_of_month=0)
@pytest.mark.vcr()
def test_exclusions_edit_standard_user_permission_error(stdapi, exclusion):
with pytest.raises(PermissionError):
stdapi.exclusions.edit(exclusion['id'], name=str(uuid.uuid4()))
@pytest.mark.vcr()
def test_exclusions_edit_network_select_notfounderror(api, exclusion):
with pytest.raises(NotFoundError):
api.exclusions.edit(exclusion['id'], network_id='00000000-0000-0000-0000-100000000001')
@pytest.mark.vcr()
def test_exclusions_edit_network_select_unexpectedvalueerror(api, exclusion):
with pytest.raises(UnexpectedValueError):
api.exclusions.edit(exclusion['id'], network_id='nope')
@pytest.mark.vcr()
def test_exclusions_edit_network_select_typeerror(api, exclusion):
with pytest.raises(TypeError):
api.exclusions.edit(exclusion['id'], network_id=1)
@pytest.mark.vcr()
def test_exclusions_edit_success(api, exclusion):
resp = api.exclusions.edit(exclusion['id'], name=str(uuid.uuid4()))
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
@pytest.mark.vcr()
def test_exclusions_edit_network_success(api, exclusion, network):
resp = api.exclusions.edit(exclusion['id'], name=str(uuid.uuid4()), network_id=network['uuid'])
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'network_id', 'uuid')
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['network_id'] == network['uuid']
@pytest.mark.vcr()
def test_exclusions_edit_freq_onetime_to_daily(api, exclusion):
resp = api.exclusions.edit(exclusion['id'], name=str(uuid.uuid4()),
frequency='daily',
interval=2)
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['freq'] == 'DAILY'
assert resp['schedule']['rrules']['interval'] == 2
@pytest.mark.vcr()
def test_exclusions_edit_freq_onetime_to_weekly_valdefault(api, exclusion):
resp = api.exclusions.edit(exclusion['id'], name=str(uuid.uuid4()),
frequency='weekly',
interval=2)
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['freq'] == 'WEEKLY'
assert resp['schedule']['rrules']['interval'] == 2
assert resp['schedule']['rrules']['byweekday'] == 'SU,MO,TU,WE,TH,FR,SA'
@pytest.mark.vcr()
def test_exclusions_edit_freq_onetime_to_weekly_valassigned(api, exclusion):
resp = api.exclusions.edit(exclusion['id'], name=str(uuid.uuid4()),
frequency='weekly',
interval=2,
weekdays=['TH', 'FR'])
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['freq'] == 'WEEKLY'
assert resp['schedule']['rrules']['interval'] == 2
assert resp['schedule']['rrules']['byweekday'] == 'TH,FR'
@pytest.mark.vcr()
def test_exclusions_edit_freq_onetime_to_weekly_valavailable(api):
exclusion = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
start_time = datetime.utcnow(),
end_time = datetime.utcnow() + timedelta(hours=1),
frequency='weekly', weekdays=['TH', 'FR'])
resp = api.exclusions.edit(exclusion['id'],
frequency='weekly',
interval=2)
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['freq'] == 'WEEKLY'
assert resp['schedule']['rrules']['interval'] == 2
assert resp['schedule']['rrules']['byweekday'] == 'TH,FR'
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_edit_enable_false_to_weekly_valdefault(api):
exclusion = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'], enabled=False)
resp = api.exclusions.edit(exclusion['id'],
enabled=True,
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1),
frequency='weekly',
interval=2)
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['freq'] == 'WEEKLY'
assert resp['schedule']['rrules']['interval'] == 2
assert resp['schedule']['rrules']['byweekday'] == 'SU,MO,TU,WE,TH,FR,SA'
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_edit_enable_false_to_weekly_valassigned(api):
exclusion = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'], enabled=False)
resp = api.exclusions.edit(exclusion['id'],
enabled=True,
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1),
frequency='weekly',
interval=2,
weekdays=['TH', 'FR'])
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['freq'] == 'WEEKLY'
assert resp['schedule']['rrules']['interval'] == 2
assert resp['schedule']['rrules']['byweekday'] == 'TH,FR'
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_edit_freq_onetime_to_monthly_valddefault(api, exclusion):
resp = api.exclusions.edit(exclusion['id'], name=str(uuid.uuid4()),
frequency='monthly',
interval=2)
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['freq'] == 'MONTHLY'
assert resp['schedule']['rrules']['interval'] == 2
@pytest.mark.vcr()
def test_exclusions_edit_freq_onetime_to_monthly_valassigned(api, exclusion):
resp = api.exclusions.edit(exclusion['id'], name=str(uuid.uuid4()),
frequency='monthly',
interval=2,
day_of_month=8)
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['freq'] == 'MONTHLY'
assert resp['schedule']['rrules']['interval'] == 2
assert resp['schedule']['rrules']['bymonthday'] == 8
@pytest.mark.vcr()
def test_exclusions_edit_freq_onetime_to_monthly_valavailable(api):
exclusion = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'],
start_time = datetime.utcnow(),
end_time = datetime.utcnow() + timedelta(hours=1),
frequency='monthly', day_of_month=8)
resp = api.exclusions.edit(exclusion['id'],
frequency='monthly',
interval=2)
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['freq'] == 'MONTHLY'
assert resp['schedule']['rrules']['interval'] == 2
assert resp['schedule']['rrules']['bymonthday'] == 8
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_edit_enable_false_to_monthly_valdefault(api):
exclusion = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'], enabled=False)
resp = api.exclusions.edit(exclusion['id'],
enabled=True,
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1),
frequency='monthly',
interval=2)
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['freq'] == 'MONTHLY'
assert resp['schedule']['rrules']['interval'] == 2
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_edit_enable_false_to_monthly_valassigned(api):
exclusion = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'], enabled=False)
resp = api.exclusions.edit(exclusion['id'],
enabled=True,
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1),
frequency='monthly',
interval=2,
day_of_month=8)
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['freq'] == 'MONTHLY'
assert resp['schedule']['rrules']['interval'] == 2
assert resp['schedule']['rrules']['bymonthday'] == 8
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_edit_freq_onetime_to_yearly(api, exclusion):
resp = api.exclusions.edit(exclusion['id'], name=str(uuid.uuid4()),
frequency='yearly',
interval=2)
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['freq'] == 'YEARLY'
assert resp['schedule']['rrules']['interval'] == 2
@pytest.mark.vcr()
def test_exclusions_edit_enable_true_exclusion(api):
exclusion = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'], enabled=False)
resp = api.exclusions.edit(exclusion['id'],
enabled=True,
start_time = datetime.utcnow(),
end_time = datetime.utcnow() + timedelta(hours=1))
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_edit_interval_exclusion_valdefault(api):
exclusion = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'], enabled=False)
resp = api.exclusions.edit(exclusion['id'],
enabled=True,
start_time = datetime.utcnow(),
end_time = datetime.utcnow() + timedelta(hours=1))
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['interval'] == 1
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_edit_interval_exclusion_valassigned(api):
exclusion = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'], enabled=False)
resp = api.exclusions.edit(exclusion['id'],
enabled=True,
interval=3,
start_time = datetime.utcnow(),
end_time = datetime.utcnow() + timedelta(hours=1))
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['interval'] == 3
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_edit_interval_exclusion_valavailable(api):
exclusion = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'], enabled=True,
frequency='Weekly',
interval=2,
start_time=datetime.utcnow(),
end_time=datetime.utcnow() + timedelta(hours=1)
)
resp = api.exclusions.edit(exclusion['id'])
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
check(resp['schedule'], 'endtime', 'datetime')
check(resp['schedule'], 'rrules', dict)
check(resp['schedule']['rrules'], 'freq', str)
check(resp['schedule']['rrules'], 'interval', int)
check(resp['schedule'], 'starttime', 'datetime')
check(resp['schedule'], 'timezone', str)
assert resp['schedule']['rrules']['interval'] == 2
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_edit_enabled_false_exclusion(api):
exclusion = api.exclusions.create(str(uuid.uuid4()), ['127.0.0.1'], enabled=False)
resp = api.exclusions.edit(exclusion['id'], members=['127.0.0.2'])
assert isinstance(resp, dict)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'members', str)
check(resp, 'name', str)
check(resp, 'schedule', dict)
check(resp['schedule'], 'enabled', bool)
api.exclusions.delete(resp['id'])
@pytest.mark.vcr()
def test_exclusions_list(api):
items = api.exclusions.list()
assert isinstance(items, list)
for exclusion in items:
check(exclusion, 'description', str, allow_none=True)
check(exclusion, 'id', int)
check(exclusion, 'last_modification_date', int)
check(exclusion, 'members', str)
check(exclusion, 'name', str)
check(exclusion, 'schedule', dict)
check(exclusion['schedule'], 'enabled', bool)
if exclusion['schedule']['enabled'] == True:
check(exclusion['schedule'], 'endtime', 'datetime')
check(exclusion['schedule'], 'rrules', dict)
check(exclusion['schedule']['rrules'], 'freq', str)
check(exclusion['schedule']['rrules'], 'interval', int)
check(exclusion['schedule'], 'starttime', 'datetime')
check(exclusion['schedule'], 'timezone', str)
| 42.360902
| 99
| 0.633146
| 4,594
| 39,438
| 5.321071
| 0.030257
| 0.126652
| 0.14326
| 0.072448
| 0.94641
| 0.940642
| 0.933115
| 0.920515
| 0.893721
| 0.873267
| 0
| 0.015217
| 0.193494
| 39,438
| 931
| 100
| 42.360902
| 0.753325
| 0
| 0
| 0.803488
| 0
| 0
| 0.175562
| 0.018357
| 0
| 0
| 0
| 0
| 0.077907
| 1
| 0.082558
| false
| 0.001163
| 0.005814
| 0
| 0.089535
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cb15c01a83ea971849ed448025b2bd28df337f06
| 12,708
|
py
|
Python
|
21-fs-ias-lec/14-BAC News/app/gui/style.py
|
Laellekoenig/BACnet
|
fbc8e23a626a60655c9f6e8fb6025eb7aac3cc07
|
[
"MIT"
] | 8
|
2020-03-17T21:12:18.000Z
|
2021-12-12T15:55:54.000Z
|
21-fs-ias-lec/14-BAC News/app/gui/style.py
|
Laellekoenig/BACnet
|
fbc8e23a626a60655c9f6e8fb6025eb7aac3cc07
|
[
"MIT"
] | 2
|
2021-07-19T06:18:43.000Z
|
2022-02-10T12:17:58.000Z
|
21-fs-ias-lec/14-BAC News/app/gui/style.py
|
Laellekoenig/BACnet
|
fbc8e23a626a60655c9f6e8fb6025eb7aac3cc07
|
[
"MIT"
] | 25
|
2020-03-20T09:32:45.000Z
|
2021-07-18T18:12:59.000Z
|
from PyQt5 import QtWidgets as qtw
from PyQt5 import QtGui as qtg
from PyQt5 import QtCore as qtc
def setArticleStyle(article):
# style sheet for displayed article
article.document().setDefaultStyleSheet(
"body {font-family: Merriweather;} "
"p {font-size: 18px; line-height: 1.5; font-weight: 300;} "
"h1 {font-weight: bold; font-style: italic;} "
"h3 {color: lightgrey;}"
"h2 {color: grey;}"
"ul {text-indent: 0px; margin-left: 0px; padding-left: 0px;}"
"li {list-style-type: disc; font-size: 16px; line-height: 1.5; font-style: italic; margin-left: 0px; padding-left: 0px;}"
)
return
def getLightStyleSheet():
# light mode style sheet
stylesheet = """
QWidget {
background-color: #f7f7f7;
color: black;
padding: 0px;
margin: 0px;
font-family: Merriweather;
}
QTextBrowser {
background-color: #f7f7f7;
border-style: none;
border-left: 5px;
padding-right: 100px;
padding-left: 50px;
padding-top: 10px;
margin-right: 0px;
}
QTextBrowser QScrollBar {
height: 0px;
width: 0px;
}
QListWidget QScrollBar {
height: 0px;
width: 0px;
}
QPushButton {
font-weight: light;
font-size: 15px;
}
QListWidget {
font-family: Assistant;
font-weight: 400;
font-size: 18px;
line-height: 2;
border-style: none;
spacing: 10;
}
QListWidget::Item {
padding-top: 5px;
padding-bottom: 5px;
border-bottom: 1px solid lightgrey;
border-radius: 3px;
}
QListWidget::Item:selected {
color: #f7f7f7;
background-color: black;
margin: 0px;
padding: 0px;
border-radius: 3px;
}
#logo {
font-size: 40px;
font-weight: bold;
font-family: Walbaum Fraktur;
}
QPushButton {
height: 50%;
border-style: none;
font-family: Assistant;
font-weight: 500;
font-size: 16px;
}
#main {
}
#container {
border-bottom: 1px solid lightgrey;
}
#selected {
color: red;
}
#srfButton {
background-color: #AF011E;
color: #f7f7f7;
border-radius: 3px;
}
#srfButton:pressed {
background-color: #f7f7f7;
color: #AF011E;
border-style: solid;
border-width: 1px;
border-color: #AF011E;
}
#blueButton {
background-color: #0C3C91;
color: #f7f7f7;
border-radius: 3px;
}
#blueButton:pressed {
background-color: #f7f7f7;
color: #0C3C91;
border-style: solid;
border-width: 1px;
border-color: #0C3C91;
}
#downloadTitle {
font-size: 20px;
}
#toggleTrue {
color: grey;
}
#toggleFalse {
color: black;
}
#bacButton {
color: #f7f7f7;
background-color: black;
border-radius: 3px;
border-style: none;
}
#bacButton:pressed {
color: black;
background-color: #f7f7f7;
border-style: solid;
border-width: 1px;
border-color: black;
}
#manualButton {
color: black;
background-color: #f7f7f7;
border-radius: 3px;
border-style: none;
border: 1px solid black
}
#manualButton:pressed {
color: #f7f7f7;
background-color: black;
border-style none;
}
#manualButton2 {
margin-top: 50px;
color: black;
background-color: #f7f7f7;
border-radius: 3px;
border-style: none;
border: 1px solid black
}
#manualButton2:pressed {
color: #f7f7f7;
background-color: black;
border-style none;
}
#gif {
margin-bottom: 100px;
}
#connectButton {
background-color: black;
color: #f7f7f7;
border-radius: 5px;
padding: 5px;
}
#connectButton:pressed {
background-color: #f7f7f7;
border-style: solid;
border-radius: 1px;
border-color: black;
color: black;
}
#lan-title {
font-family: Assistant;
font-size: 20px;
margin-bottom: 10px;
margin-top: 5px;
}
#server-text {
font-family: Assistant;
font-size: 50px;
margin-bottom: 40px;
text-align: center;
}
#filter-layout {
margin: 0px;
padding: 0px;
}
#filter-btn {
padding: 0px;
margin: 0px;
height: 20%;
}
#filter-btn-selected {
padding: 0px;
margin: 0px;
height: 20%;
color: grey;
}
#bookmark {
padding: 0px;
margin: 0px;
font-size: 100px;
height: 40px;
}
#bookmark-btn {
}
#combo {
margin-bottom: 5px;
}
QComboBox {
color: black;
background-color: #f7f7f7;
border: 1px solid black;
border-radius: 3px;
padding: 5px;
font-family: Assistant;
font-size: 16px;
}
QComboBox::Item {
background-color: black;
color: #f7f7f7;
}
QComboBox::Item:selected {
background-color: #f7f7f7;
color: black;
}
QComboBox::drop-down {
border-radius: 3px;
}
#client-text {
font-family: Assistant;
font-size: 35px;
text-align: center;
margin-bottom: 10px;
}
QLineEdit {
font-family: Assistant;
font-size: 25px;
border: 1px solid #f7f7f7;
background-color: black;
padding: 5px;
border-radius: 3px;
}
QLineEdit:focus {
background: #f7f7f7;
color: black;
border: 1px solid black;
outline: none;
show-decoration-selected: 0;
}
#bt-client-btn {
color: #f7f7f7;
background-color: black;
border-radius: 3px;
border-style: none;
margin-top: 10px;
margin-bottom: 100px;
}
#bt-client-btn:pressed {
color: black;
background-color: #f7f7f7;
border: 1px solid black;
border-radius: 3px;
margin-top: 10px;
}
#loginBtn {
color: #f7f7f7;
background-color: black;
border-radius: 3px;
border-style: none;
margin-bottom: 50px;
}
#loginBtn:pressed {
color: black;
background-color: #f7f7f7;
border-style: solid;
border-width: 1px;
border-color: black;
}
#bac-text {
font-family: Assistant;
font-size: 35px;
text-align: center;
margin-bottom: 10px;
}"""
return stylesheet
def getDarkStyleSheet():
# dark mode style sheet
stylesheet = """
QWidget {
background-color: #282828;
color: #f7f7f7;
padding: 0px;
margin: 0px;
font-family: Merriweather;
}
QTextBrowser {
background-color: #282828;
color: #f7f7f7;
border-style: none;
border-left: 5px;
padding-right: 100px;
padding-left: 50px;
padding-top: 10px;
}
QTextBrowser QScrollBar {
height: 0px;
width: 0px;
}
QListWidget QScrollBar {
height: 0px;
width: 0px;
}
QPushButton {
font-weight: light;
font-size: 15px;
}
QListWidget {
font-family: Assistant;
font-weight: 400;
font-size: 18px;
line-height: 2;
border-style: none;
spacing: 10;
}
QListWidget::Item {
padding-top: 5px;
padding-bottom: 5px;
border-bottom: 1px solid lightgrey;
}
QListWidget::Item:selected {
color: #282828;
background-color: #f7f7f7;
margin: 0px;
padding: 0px;
border-radius: 3px;
}
#logo {
font-size: 40px;
font-weight: bold;
font-family: Walbaum Fraktur;
}
QPushButton {
height: 50%;
border-style: none;
font-family: Assistant;
font-weight: 500;
font-size 16px;
}
#main {
}
#container {
border-bottom: 1px solid lightgrey;
}
#selected {
color: red;
}
#srfButton {
background-color: #f7f7f7;
color: #AF011E;
border-radius: 3px;
border-style: none;
}
#srfButton:pressed {
background-color: #AF011E;
color: #f7f7f7;
border-style: solid;
border-width: 1px;
border-color: #f7f7f7;
}
#blueButton {
background-color: #f7f7f7;
color: #0C3C91;
border-radius: 3px;
}
#blueButton:pressed {
background-color: #0C3C91;
color: #f7f7f7;
border-style: solid;
border-width: 1px;
border-color: #f7f7f7;
}
#downloadTitle {
font-size: 20px;
}
#toggleTrue {
color: grey;
}
#toggleFalse {
color: #f7f7f7;
}
#bacButton {
color: #282828;
background-color: #f7f7f7;
border-radius: 3px;
border-style: none;
}
#bacButton:pressed {
color: #f7f7f7;
background-color: #282828;
border-style: solid;
border-width: 1px;
border-color: #f7f7f7;
}
#manualButton {
color: #f7f7f7;
background-color: #282828;
border-radius: 3px;
border-style: none;
border: 1px solid #f7f7f7;
}
#manualButton:pressed {
color: #282828;
background-color: #f7f7f7;
}
#manualButton2 {
margin-top: 50px;
color: #f7f7f7;
background-color: #282828;
border-radius: 3px;
border-style: none;
border: 1px solid #f7f7f7;
}
#manualButton2:pressed {
color: #282828;
background-color: #f7f7f7;
border-style: none;
}
#gif {
margin-bottom: 100px;
}
#lan-title {
font-family: Assistant;
font-size: 20px;
margin-bottom: 10px;
margin-top: 5px;
}
#server-text {
font-family: Assistant;
font-size: 50px;
margin-bottom: 40px;
text-align: center;
}
#filter-layout {
margin: 0px;
padding: 0px;
}
#filter-btn {
padding: 0px;
margin: 0px;
height: 20%;
color: #f7f7f7;
}
#filter-btn-selected {
padding: 0px;
margin: 0px;
height: 20%;
color: grey;
}
#bookmark {
padding: 0px;
margin: 0px;
font-size: 100px;
height: 40px;
}
#bookmark-btn {
}
#combo {
margin-bottom: 5px;
}
QComboBox {
color: #f7f7f7;
background-color: #282828;
border: 1px solid #f7f7f7;
border-radius: 3px;
padding: 5px;
font-family: Assistant;
font-size: 16px;
}
QComboBox::Item {
background-color: #f7f7f7;
color: #282828;
}
QComboBox::Item:selected {
background-color: #282828;
color: #f7f7f7;
}
QComboBox::drop-down {
border-radius: 3px;
}
#client-text {
font-family: Assistant;
font-size: 35px;
text-align: center;
margin-bottom: 10px;
}
QLineEdit {
font-family: Assistant;
font-size: 25px;
border: 1px solid #282828;
background-color: #f7f7f7;
padding: 5px;
border-radius: 3px;
}
QLineEdit:focus {
background: #282828;
color: #f7f7f7;
border: 1px solid #f7f7f7;
outline: none;
show-decoration-selected: 0;
}
#bt-client-btn {
color: #282828;
background-color: #f7f7f7;
border-radius: 3px;
border-style: none;
margin-top: 10px;
margin-bottom: 100px;
}
#bt-client-btn:pressed {
color: #f7f7f7;
background-color: #282828;
border: 1px solid #f7f7f7;
border-radius: 3px;
margin-top: 10px;
}
#loginBtn {
color: #282828;
background-color: #f7f7f7;
border-radius: 3px;
border-style: none;
margin-bottom: 50px;
}
#loginBtn:pressed {
color: #f7f7f7;
background-color: #282828;
border-style: solid;
border-width: 1px;
border-color: #f7f7f7;
}
#bac-text {
font-family: Assistant;
font-size: 35px;
text-align: center;
margin-bottom: 10px;
}"""
return stylesheet
| 22.856115
| 129
| 0.522269
| 1,192
| 12,708
| 5.567953
| 0.116611
| 0.082869
| 0.056501
| 0.055447
| 0.875396
| 0.816483
| 0.764954
| 0.72141
| 0.687961
| 0.651198
| 0
| 0.072386
| 0.366226
| 12,708
| 555
| 130
| 22.897297
| 0.751676
| 0.006138
| 0
| 0.778793
| 0
| 0.003656
| 0.966102
| 0.023919
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005484
| false
| 0
| 0.005484
| 0
| 0.016453
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cb490eb12bf4b3c5f7f763cfca8eb3caf56bd15b
| 3,981
|
py
|
Python
|
QIIDderivative/term1.py
|
avanteijlingen/lipid-md
|
825c7bc982bc920a24e64e272354a7317eac9cbd
|
[
"MIT"
] | 2
|
2020-11-02T14:55:31.000Z
|
2021-05-04T05:12:14.000Z
|
QIIDderivative/term1.py
|
avanteijlingen/lipid-md
|
825c7bc982bc920a24e64e272354a7317eac9cbd
|
[
"MIT"
] | null | null | null |
QIIDderivative/term1.py
|
avanteijlingen/lipid-md
|
825c7bc982bc920a24e64e272354a7317eac9cbd
|
[
"MIT"
] | 1
|
2020-11-02T16:35:21.000Z
|
2020-11-02T16:35:21.000Z
|
# -*- coding: utf-8 -*-
"""
author: Chris Brasnett, University of Bristol, christopher.brasnett@bristol.ac.uk
"""
import numpy as np
import math
pi=math.pi
def term1(x,y,z,m):
#t1_x
cg = (-3 * pi * m * math.sin(pi * m * x) * math.cos(pi * m * y) * math.cos(pi * m * z) -
3 * pi * m * math.sin(pi * m * x) * math.sin(pi * m * y) * math.sin(pi * m * z) +
3 * math.cos(pi * m * y) * math.sin(pi * m * z) * pi * m * math.cos(pi * m * x) +
3 * math.cos(pi * m * z) * pi * m * math.cos(pi * m * x) * math.sin(pi * m * y))
#t1_y
cg0 = (-3 * math.cos(pi * m * z) * pi * m * math.cos(pi * m * x) * math.sin(pi * m * y) +
3 * math.cos(pi * m * y) * math.sin(pi * m * z) * pi * m * math.cos(pi * m * x) -
3 * pi * m * math.sin(pi * m * x) * math.sin(pi * m * y) * math.sin(pi * m * z) +
3 * pi * m * math.sin(pi * m * x) * math.cos(pi * m * y) * math.cos(pi * m * z))
#t1_z
cg1 = (-3 * math.cos(pi * m * y) * math.sin(pi * m * z) * pi * m * math.cos(pi * m * x) +
3 * math.cos(pi * m * z) * pi * m * math.cos(pi * m * x) * math.sin(pi * m * y) +
3 * pi * m * math.sin(pi * m * x) * math.cos(pi * m * y) * math.cos(pi * m * z) -
3 * pi * m * math.sin(pi * m * x) * math.sin(pi * m * y) * math.sin(pi * m * z))
#t1_xx
cg2 = (-3 * pi ** 2 * m ** 2 * math.cos(pi * m * x) * math.cos(pi * m * y) * math.cos(pi * m * z) -
3 * pi ** 2 * m ** 2 * math.cos(pi * m * x) * math.sin(pi * m * y) * math.sin(pi * m * z) -
3 * math.cos(pi * m * y) * math.sin(pi * m * z) * pi ** 2 * m ** 2 * math.sin(pi * m * x) -
3 * math.cos(pi * m * z) * pi ** 2 * m ** 2 * math.sin(pi * m * x) * math.sin(pi * m * y))
#t1_xy
cg3 = (3 * math.cos(pi * m * z) * pi ** 2 * m ** 2 * math.sin(pi * m * x) * math.sin(pi * m * y) -
3 * math.cos(pi * m * y) * math.sin(pi * m * z) * pi ** 2 * m ** 2 * math.sin(pi * m * x) -
3 * pi ** 2 * m ** 2 * math.cos(pi * m * x) * math.sin(pi * m * y) * math.sin(pi * m * z) +
3 * pi ** 2 * m ** 2 * math.cos(pi * m * x) * math.cos(pi * m * y) * math.cos(pi * m * z))
#t1_yy
cg4 = (-3 * pi ** 2 * m ** 2 * math.cos(pi * m * x) * math.cos(pi * m * y) * math.cos(pi * m * z) -
3 * pi ** 2 * m ** 2 * math.cos(pi * m * x) * math.sin(pi * m * y) * math.sin(pi * m * z) -
3 * math.cos(pi * m * y) * math.sin(pi * m * z) * pi ** 2 * m ** 2 * math.sin(pi * m * x) -
3 * math.cos(pi * m * z) * pi ** 2 * m ** 2 * math.sin(pi * m * x) * math.sin(pi * m * y))
#t1_yz
cg5 = (3 * pi ** 2 * m ** 2 * math.cos(pi * m * x) * math.sin(pi * m * y) * math.sin(pi * m * z) +
3 * pi ** 2 * m ** 2 * math.cos(pi * m * x) * math.cos(pi * m * y) * math.cos(pi * m * z) -
3 * math.cos(pi * m * z) * pi ** 2 * m ** 2 * math.sin(pi * m * x) * math.sin(pi * m * y) -
3 * math.cos(pi * m * y) * math.sin(pi * m * z) * pi ** 2 * m ** 2 * math.sin(pi * m * x))
#t1_zz
cg6 = (-3 * pi ** 2 * m ** 2 * math.cos(pi * m * x) * math.cos(pi * m * y) * math.cos(pi * m * z) -
3 * pi ** 2 * m ** 2 * math.cos(pi * m * x) * math.sin(pi * m * y) * math.sin(pi * m * z) -
3 * math.cos(pi * m * y) * math.sin(pi * m * z) * pi ** 2 * m ** 2 * math.sin(pi * m * x) -
3 * math.cos(pi * m * z) * pi ** 2 * m ** 2 * math.sin(pi * m * x) * math.sin(pi * m * y))
#t1_xz
cg7 = (3 * math.cos(pi * m * y) * math.sin(pi * m * z) * pi ** 2 * m ** 2 * math.sin(pi * m * x) -
3 * math.cos(pi * m * z) * pi ** 2 * m ** 2 * math.sin(pi * m * x) * math.sin(pi * m * y) +
3 * pi ** 2 * m ** 2 * math.cos(pi * m * x) * math.cos(pi * m * y) * math.cos(pi * m * z) -
3 * pi ** 2 * m ** 2 * math.cos(pi * m * x) * math.sin(pi * m * y) * math.sin(pi * m * z))
return np.array([cg,cg0,cg1,cg2,cg3,cg4,cg5,cg6,cg7])
| 60.318182
| 104
| 0.404421
| 764
| 3,981
| 2.09555
| 0.060209
| 0.224859
| 0.30356
| 0.337289
| 0.869457
| 0.869457
| 0.869457
| 0.869457
| 0.869457
| 0.868207
| 0
| 0.043735
| 0.362472
| 3,981
| 66
| 105
| 60.318182
| 0.587076
| 0.036674
| 0
| 0.341463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.04878
| 0
| 0.097561
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
cb84815978770b557b158e856e0bea84fde18836
| 1,453
|
py
|
Python
|
apimaster/migrations/0004_auto_20211129_1749.py
|
romsha28/hospital_python
|
1bb86266223df5084321917169156aaec1c5e318
|
[
"Apache-2.0"
] | null | null | null |
apimaster/migrations/0004_auto_20211129_1749.py
|
romsha28/hospital_python
|
1bb86266223df5084321917169156aaec1c5e318
|
[
"Apache-2.0"
] | 1
|
2021-10-18T08:56:11.000Z
|
2021-10-18T08:56:11.000Z
|
apimaster/migrations/0004_auto_20211129_1749.py
|
romsha28/hospital_python
|
1bb86266223df5084321917169156aaec1c5e318
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-11-29 12:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('apimaster', '0003_auto_20211129_1747'),
]
operations = [
migrations.AlterField(
model_name='userprofiles',
name='created_by',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='userprofiles',
name='deleted_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='userprofiles',
name='deleted_by',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='userprofiles',
name='profile_id',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='userprofiles',
name='updated_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='userprofiles',
name='updated_by',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='userprofiles',
name='users_id',
field=models.IntegerField(blank=True, null=True),
),
]
| 29.653061
| 62
| 0.569167
| 134
| 1,453
| 6.044776
| 0.320896
| 0.17284
| 0.216049
| 0.250617
| 0.788889
| 0.788889
| 0.733333
| 0.733333
| 0.679012
| 0.679012
| 0
| 0.031219
| 0.316586
| 1,453
| 48
| 63
| 30.270833
| 0.784491
| 0.03097
| 0
| 0.666667
| 1
| 0
| 0.130868
| 0.016358
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02381
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cb9a0e3b4c3e3c19aa939ea3512ea45eb18d3dce
| 4,538
|
py
|
Python
|
tests/test_legacy.py
|
rot26/pipenv
|
8378a1b104f2d817790a05da370bef0a1b00f452
|
[
"MIT"
] | null | null | null |
tests/test_legacy.py
|
rot26/pipenv
|
8378a1b104f2d817790a05da370bef0a1b00f452
|
[
"MIT"
] | null | null | null |
tests/test_legacy.py
|
rot26/pipenv
|
8378a1b104f2d817790a05da370bef0a1b00f452
|
[
"MIT"
] | null | null | null |
import os
from mock import patch, Mock, PropertyMock
from pipenv.core import (
pip_install, pip_download
)
# Tell pipenv to ignore activated virtualenvs.
os.environ['PIPENV_IGNORE_VIRTUALENVS'] = 'True'
class TestPipenv():
@patch('pipenv.project.Project.sources', new_callable=PropertyMock)
@patch('delegator.run')
def test_pip_install_should_try_every_possible_source(self, mocked_delegator, mocked_sources):
sources = [
{'url': 'http://dontexistis.in.pypi/simple'},
{'url': 'http://existis.in.pypi/simple'}
]
mocked_sources.return_value = sources
first_cmd_return = Mock()
first_cmd_return.return_code = 1
second_cmd_return = Mock()
second_cmd_return.return_code = 0
mocked_delegator.side_effect = [first_cmd_return, second_cmd_return]
c = pip_install('package')
assert c.return_code == 0
@patch('pipenv.project.Project.sources', new_callable=PropertyMock)
@patch('delegator.run')
def test_pip_install_should_return_the_last_error_if_no_cmd_worked(self, mocked_delegator, mocked_sources):
sources = [
{'url': 'http://dontexistis.in.pypi/simple'},
{'url': 'http://dontexistis.in.pypi/simple'}
]
mocked_sources.return_value = sources
first_cmd_return = Mock()
first_cmd_return.return_code = 1
second_cmd_return = Mock()
second_cmd_return.return_code = 1
mocked_delegator.side_effect = [first_cmd_return, second_cmd_return]
c = pip_install('package')
assert c.return_code == 1
assert c == second_cmd_return
@patch('pipenv.project.Project.sources', new_callable=PropertyMock)
@patch('delegator.run')
def test_pip_install_should_return_the_first_cmd_that_worked(self, mocked_delegator, mocked_sources):
sources = [
{'url': 'http://existis.in.pypi/simple'},
{'url': 'http://existis.in.pypi/simple'}
]
mocked_sources.return_value = sources
first_cmd_return = Mock()
first_cmd_return.return_code = 0
second_cmd_return = Mock()
second_cmd_return.return_code = 0
mocked_delegator.side_effect = [first_cmd_return, second_cmd_return]
c = pip_install('package')
assert c.return_code == 0
assert c == first_cmd_return
@patch('pipenv.project.Project.sources', new_callable=PropertyMock)
@patch('delegator.run')
def test_pip_download_should_try_every_possible_source(self, mocked_delegator, mocked_sources):
sources = [
{'url': 'http://dontexistis.in.pypi/simple'},
{'url': 'http://existis.in.pypi/simple'}
]
mocked_sources.return_value = sources
first_cmd_return = Mock()
first_cmd_return.return_code = 1
second_cmd_return = Mock()
second_cmd_return.return_code = 0
mocked_delegator.side_effect = [first_cmd_return, second_cmd_return]
c = pip_download('package')
assert c.return_code == 0
@patch('pipenv.project.Project.sources', new_callable=PropertyMock)
@patch('delegator.run')
def test_pip_download_should_return_the_last_error_if_no_cmd_worked(self, mocked_delegator, mocked_sources):
sources = [
{'url': 'http://dontexistis.in.pypi/simple'},
{'url': 'http://dontexistis.in.pypi/simple'}
]
mocked_sources.return_value = sources
first_cmd_return = Mock()
first_cmd_return.return_code = 1
second_cmd_return = Mock()
second_cmd_return.return_code = 1
mocked_delegator.side_effect = [first_cmd_return, second_cmd_return]
c = pip_download('package')
assert c.return_code == 1
assert c == second_cmd_return
@patch('pipenv.project.Project.sources', new_callable=PropertyMock)
@patch('delegator.run')
def test_pip_download_should_return_the_first_cmd_that_worked(self, mocked_delegator, mocked_sources):
sources = [
{'url': 'http://existis.in.pypi/simple'},
{'url': 'http://existis.in.pypi/simple'}
]
mocked_sources.return_value = sources
first_cmd_return = Mock()
first_cmd_return.return_code = 0
second_cmd_return = Mock()
second_cmd_return.return_code = 0
mocked_delegator.side_effect = [first_cmd_return, second_cmd_return]
c = pip_download('package')
assert c.return_code == 0
assert c == first_cmd_return
| 39.807018
| 112
| 0.666152
| 554
| 4,538
| 5.088448
| 0.111913
| 0.127705
| 0.099326
| 0.08088
| 0.938631
| 0.938631
| 0.938631
| 0.938631
| 0.938631
| 0.938631
| 0
| 0.005134
| 0.227413
| 4,538
| 113
| 113
| 40.159292
| 0.798916
| 0.009696
| 0
| 0.811881
| 0
| 0
| 0.164069
| 0.045637
| 0
| 0
| 0
| 0
| 0.09901
| 1
| 0.059406
| false
| 0
| 0.029703
| 0
| 0.09901
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cbc2bdfb9b6602c561cba454bb9da8b2c7944a2e
| 4,892
|
py
|
Python
|
tests/test_range.py
|
flzara/shaystack
|
6bf815f25f3a5d64494ec1c4a34a7b23ea0ad4ce
|
[
"BSD-2-Clause"
] | 9
|
2021-04-30T13:04:31.000Z
|
2022-01-11T14:11:53.000Z
|
tests/test_range.py
|
flzara/shaystack
|
6bf815f25f3a5d64494ec1c4a34a7b23ea0ad4ce
|
[
"BSD-2-Clause"
] | 7
|
2021-03-19T07:31:22.000Z
|
2021-03-26T12:31:45.000Z
|
tests/test_range.py
|
flzara/shaystack
|
6bf815f25f3a5d64494ec1c4a34a7b23ea0ad4ce
|
[
"BSD-2-Clause"
] | 5
|
2021-04-29T11:51:04.000Z
|
2022-02-22T21:10:19.000Z
|
from datetime import datetime, date, timedelta
import pytz
from shaystack.providers.haystack_interface import parse_date_range, _DATETIME_MAX_TZ, _DATETIME_MIN_TZ
_TZ_PARIS = pytz.timezone("Europe/Paris")
def test_date_range_empty():
date_min, date_max = parse_date_range("", _TZ_PARIS)
assert date_min == datetime.min.replace(tzinfo=pytz.UTC)
assert date_max == datetime.max.replace(tzinfo=pytz.UTC)
def test_date_range_today():
date_min, date_max = parse_date_range("today", _TZ_PARIS)
assert date_min == datetime.combine(date.today(), datetime.min.time()) \
.replace(tzinfo=_TZ_PARIS)
assert date_max == date_min + timedelta(days=1)
def test_date_range_today_date():
date_min, date_max = parse_date_range("today,2100-01-01", _TZ_PARIS)
assert date_min == datetime.combine(date.today(), datetime.min.time()) \
.replace(tzinfo=_TZ_PARIS)
assert date_max == datetime.combine(datetime(2100, 1, 1), datetime.min.time()).replace(tzinfo=_TZ_PARIS)
def test_date_range_date_today():
date_min, date_max = parse_date_range("2021-01-01,today", _TZ_PARIS)
assert date_min == datetime.combine(datetime(2021, 1, 1), datetime.min.time()).replace(tzinfo=_TZ_PARIS)
assert date_max == datetime.combine(date.today(), datetime.min.time()) \
.replace(tzinfo=_TZ_PARIS) + timedelta(days=1)
def test_date_range_yesterday():
date_min, date_max = parse_date_range("yesterday", _TZ_PARIS)
assert date_min == datetime.combine(date.today() - timedelta(days=1), datetime.min.time()) \
.replace(tzinfo=_TZ_PARIS)
assert date_max == date_min + timedelta(days=1)
def test_date_range_yesterday_date():
date_min, date_max = parse_date_range("yesterday,2100-01-01", _TZ_PARIS)
assert date_min == datetime.combine(date.today() - timedelta(days=1), datetime.min.time()) \
.replace(tzinfo=_TZ_PARIS)
assert date_max == datetime.combine(date(2100, 1, 1), datetime.min.time()).replace(tzinfo=_TZ_PARIS)
def test_date_range_date_yesterday():
date_min, date_max = parse_date_range("2021-01-01,yesterday", _TZ_PARIS)
assert date_min == datetime.combine(date(2021, 1, 1), datetime.min.time()).replace(tzinfo=_TZ_PARIS)
assert date_max == datetime.combine(date.today(), datetime.min.time()).replace(tzinfo=_TZ_PARIS)
def test_date_range_yesterday_today():
date_min, date_max = parse_date_range("yesterday,today", _TZ_PARIS)
assert date_min == datetime.combine(date.today() - timedelta(days=1), datetime.min.time()) \
.replace(tzinfo=_TZ_PARIS)
assert date_max == datetime.combine(date.today(), datetime.min.time()) \
.replace(tzinfo=_TZ_PARIS) + timedelta(days=1)
def test_date_range_date():
date_min, date_max = parse_date_range("2020-12-24", _TZ_PARIS)
assert date_min == datetime(2020, 12, 24, tzinfo=_TZ_PARIS)
assert date_max == datetime(2020, 12, 25, tzinfo=_TZ_PARIS)
def test_date_range_date_comma():
date_min, date_max = parse_date_range("2020-12-24,", _TZ_PARIS)
assert date_min == datetime(2020, 12, 24, tzinfo=_TZ_PARIS)
assert date_max == _DATETIME_MAX_TZ
def test_date_range_comma_date():
date_min, date_max = parse_date_range(",2100-12-24", _TZ_PARIS)
assert date_min == _DATETIME_MIN_TZ
assert date_max == datetime.combine(datetime(2100, 12, 24),
datetime.max.time()).replace(tzinfo=_TZ_PARIS)
def test_date_range_date_date():
date_min, date_max = parse_date_range("2020-12-24,2020-12-25", _TZ_PARIS)
assert date_min == datetime(2020, 12, 24, tzinfo=_TZ_PARIS)
assert date_max == datetime.combine(datetime(2020, 12, 25),
datetime.max.time()).replace(tzinfo=_TZ_PARIS)
def test_date_range_datetime():
date_min, date_max = parse_date_range("2020-12-24T00:00:00+00:00", _TZ_PARIS)
assert date_min == datetime(2020, 12, 24, tzinfo=pytz.UTC)
assert date_max == _DATETIME_MAX_TZ
def test_date_range_datetime_comma():
date_min, date_max = parse_date_range("2020-12-24T00:00:00+00:00,", _TZ_PARIS)
assert date_min == datetime(2020, 12, 24, tzinfo=pytz.UTC)
assert date_max == _DATETIME_MAX_TZ
def test_date_range_comma_datetime():
date_min, date_max = parse_date_range(",2100-12-24T00:00:00+00:00", _TZ_PARIS)
assert date_min == _DATETIME_MIN_TZ
assert date_max == datetime(2100, 12, 24, tzinfo=pytz.UTC)
def test_date_range_datetime_datetime():
date_min, date_max = parse_date_range("2020-12-24T00:00:00+00:00,2020-12-25T00:00:00+00:00", _TZ_PARIS)
assert date_min == datetime(2020, 12, 24, tzinfo=pytz.UTC)
assert date_max == datetime(2020, 12, 25, tzinfo=pytz.UTC)
def test_date_range_date_limit():
date_min, date_max = parse_date_range("0001-01-01,9999-12-31", _TZ_PARIS)
assert date_min == _DATETIME_MIN_TZ
assert date_max == _DATETIME_MAX_TZ
| 41.109244
| 108
| 0.720564
| 744
| 4,892
| 4.376344
| 0.061828
| 0.077396
| 0.107801
| 0.140971
| 0.925676
| 0.909091
| 0.90602
| 0.848587
| 0.746315
| 0.693489
| 0
| 0.07136
| 0.149223
| 4,892
| 118
| 109
| 41.457627
| 0.710956
| 0
| 0
| 0.382716
| 0
| 0.012346
| 0.064391
| 0.034751
| 0
| 0
| 0
| 0
| 0.419753
| 1
| 0.209877
| false
| 0
| 0.037037
| 0
| 0.246914
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1dc452ee251b20c2e3801fccfaf2067a4b2e400d
| 117
|
py
|
Python
|
src/analytics/lambdas/test/test_quicksight_users_importer.py
|
shehir12/covid19-app-system-public
|
63184012d85335f7c499fe41ab534a0ef935a4b8
|
[
"MIT"
] | null | null | null |
src/analytics/lambdas/test/test_quicksight_users_importer.py
|
shehir12/covid19-app-system-public
|
63184012d85335f7c499fe41ab534a0ef935a4b8
|
[
"MIT"
] | null | null | null |
src/analytics/lambdas/test/test_quicksight_users_importer.py
|
shehir12/covid19-app-system-public
|
63184012d85335f7c499fe41ab534a0ef935a4b8
|
[
"MIT"
] | null | null | null |
from . import check_code
def test_code_is_valid():
assert check_code('importers/quicksight_users_importer.py')
| 19.5
| 63
| 0.794872
| 17
| 117
| 5.058824
| 0.823529
| 0.209302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119658
| 117
| 5
| 64
| 23.4
| 0.834951
| 0
| 0
| 0
| 0
| 0
| 0.324786
| 0.324786
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.666667
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3821d7c032f8f0ad0a170aed4f91d9f7d4d7e160
| 238
|
py
|
Python
|
pyrate/algorithms/__init__.py
|
fscutti/pyrate
|
0e974cdaa43e25cfa1a93de4449e5a39f67b1097
|
[
"BSD-3-Clause"
] | null | null | null |
pyrate/algorithms/__init__.py
|
fscutti/pyrate
|
0e974cdaa43e25cfa1a93de4449e5a39f67b1097
|
[
"BSD-3-Clause"
] | null | null | null |
pyrate/algorithms/__init__.py
|
fscutti/pyrate
|
0e974cdaa43e25cfa1a93de4449e5a39f67b1097
|
[
"BSD-3-Clause"
] | null | null | null |
"""Import algorithm modules here."""
import pyrate.algorithms.variables
import pyrate.algorithms.trees
import pyrate.algorithms.plots
import pyrate.algorithms.histograms
import pyrate.algorithms.regions
import pyrate.algorithms.muondet
| 23.8
| 36
| 0.844538
| 28
| 238
| 7.178571
| 0.428571
| 0.358209
| 0.656716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07563
| 238
| 9
| 37
| 26.444444
| 0.913636
| 0.12605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3821fd2251adb0a71f7cbd567d9a4b9102d5b2d5
| 134
|
py
|
Python
|
algorithms/classification_albert/src/classification_albert_test.py
|
algorithmia-algorithms/qa-algorithm-scripts
|
ac2ae204e473bb788fad989d41f56adf7a326a32
|
[
"MIT"
] | null | null | null |
algorithms/classification_albert/src/classification_albert_test.py
|
algorithmia-algorithms/qa-algorithm-scripts
|
ac2ae204e473bb788fad989d41f56adf7a326a32
|
[
"MIT"
] | null | null | null |
algorithms/classification_albert/src/classification_albert_test.py
|
algorithmia-algorithms/qa-algorithm-scripts
|
ac2ae204e473bb788fad989d41f56adf7a326a32
|
[
"MIT"
] | null | null | null |
from . import classification_albert
def test_classification_albert():
assert classification_albert.apply("Jane") == "hello Jane"
| 26.8
| 62
| 0.783582
| 15
| 134
| 6.733333
| 0.666667
| 0.594059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119403
| 134
| 4
| 63
| 33.5
| 0.855932
| 0
| 0
| 0
| 0
| 0
| 0.104478
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
698171c45a291379b7a962f005b1076cc1d280ff
| 159
|
py
|
Python
|
Lib/Scripts/glyphs/transform/outline.py
|
gferreira/hTools2
|
a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c
|
[
"BSD-3-Clause"
] | 11
|
2015-01-06T15:43:56.000Z
|
2019-07-27T00:35:20.000Z
|
hTools2.roboFontExt/lib/Scripts/selected glyphs/transform/outline.py
|
gferreira/hTools2_extension
|
9e5150082a0a39847c1078aac3dc38d914a44f83
|
[
"BSD-3-Clause"
] | 2
|
2017-08-08T21:02:17.000Z
|
2019-12-18T15:55:48.000Z
|
Lib/Scripts/glyphs/transform/outline.py
|
gferreira/hTools2
|
a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c
|
[
"BSD-3-Clause"
] | 4
|
2015-01-10T13:58:50.000Z
|
2019-12-18T15:40:14.000Z
|
# [h] outline glyphs dialog
import hTools2.dialogs.glyphs.outline
reload(hTools2.dialogs.glyphs.outline)
hTools2.dialogs.glyphs.outline.outlineGlyphsDialog()
| 26.5
| 52
| 0.830189
| 19
| 159
| 6.947368
| 0.473684
| 0.318182
| 0.454545
| 0.613636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020134
| 0.062893
| 159
| 6
| 52
| 26.5
| 0.865772
| 0.157233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
0e13d8a91c459adaeebb4482810ca9d6fb8c2f84
| 2,355
|
py
|
Python
|
sign_in.py
|
Manoah-3401/Gazda
|
b8c1271fa38b2e771f19bd1253407fba4c978a68
|
[
"MIT"
] | 1
|
2021-05-23T15:27:32.000Z
|
2021-05-23T15:27:32.000Z
|
sign_in.py
|
kd100100/gadza
|
b260cb7ece2f945c0d5e9a207ea8c23ebe3bc630
|
[
"MIT"
] | null | null | null |
sign_in.py
|
kd100100/gadza
|
b260cb7ece2f945c0d5e9a207ea8c23ebe3bc630
|
[
"MIT"
] | null | null | null |
def sign_in_database_create_farmer(request,cursor,con):
cursor.execute(""" SELECT `PhoneNumber` FROM `farmers` """)
phone_numbers = cursor.fetchall()
IsUnique = 1
#print(phone_numbers) [('8610342197',), ('9176580040',)]
#check if phone number matches
for i in phone_numbers:
if(i[0]==request.form['phone']):
IsUnique = 2
if(IsUnique==1):
cursor.execute(""" INSERT INTO `farmers`(`Name`, `PhoneNumber`, `Password`) VALUES ( '{}','{}','{}')"""
.format(request.form['fname'],request.form['phone'],request.form['pwd']))
con.commit()
cursor.execute(""" CREATE TABLE `{}` ( `GoodsName` VARCHAR(100) NOT NULL ,
`IsOrganic` VARCHAR(10) NOT NULL , `DatePosted` VARCHAR(10) NOT NULL , `Quantity` VARCHAR(10) NOT NULL ,
`Region` VARCHAR(100) NOT NULL , `OfferedPrice` VARCHAR(100) NOT NULL ,
`Shipping` VARCHAR(100) NOT NULL , `IsAvaliable` VARCHAR(10) NOT NULL )""".format(request.form['fname']))
con.commit()
return IsUnique #1 success 2failed 3empty
#retailer
def sign_in_database_create_retailer(request,cursor,con):
cursor.execute(""" SELECT `PhoneNumber` FROM `retailers` """)
phone_numbers = cursor.fetchall()
IsUnique = 1
#print(phone_numbers) [('8610342197',), ('9176580040',)]
#check if phone number matches
for i in phone_numbers:
if(i[0]==request.form['phone_']):
IsUnique = 2
if(IsUnique==1):
cursor.execute(""" INSERT INTO `retailers`(`Name`, `PhoneNumber`, `Password`) VALUES ( '{}','{}','{}')"""
.format(request.form['fname'],request.form['phone_'],request.form['pwd']))
con.commit()
cursor.execute(""" CREATE TABLE `{}` ( `GoodsName` VARCHAR(100) NOT NULL ,
`IsOrganic` VARCHAR(10) NOT NULL , `DatePosted` VARCHAR(10) NOT NULL , `Quantity` VARCHAR(10) NOT NULL ,
`Region` VARCHAR(100) NOT NULL , `OfferedPrice` VARCHAR(100) NOT NULL ,
`Shipping` VARCHAR(100) NOT NULL , `IsAvaliable` VARCHAR(10) NOT NULL )""".format(request.form['fname']))
con.commit()
return IsUnique #1 success 2failed 3empty
| 50.106383
| 121
| 0.569851
| 249
| 2,355
| 5.325301
| 0.240964
| 0.084465
| 0.078431
| 0.102564
| 0.959276
| 0.924585
| 0.924585
| 0.924585
| 0.84917
| 0.84917
| 0
| 0.054874
| 0.272611
| 2,355
| 47
| 122
| 50.106383
| 0.719206
| 0.096391
| 0
| 0.705882
| 0
| 0.058824
| 0.474217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0.058824
| 0
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
38ace47fb6244475a3ec4dc56d35fce18719a2a3
| 110
|
py
|
Python
|
src/celery_longterm_scheduler/__init__.py
|
ZeitOnline/celery_longterm_scheduler
|
5dca063cefdf66b3065472f9f4672041640f132a
|
[
"BSD-3-Clause"
] | 22
|
2018-05-21T21:00:25.000Z
|
2022-01-19T01:30:29.000Z
|
src/celery_longterm_scheduler/__init__.py
|
ZeitOnline/celery_longterm_scheduler
|
5dca063cefdf66b3065472f9f4672041640f132a
|
[
"BSD-3-Clause"
] | 5
|
2020-03-19T04:41:03.000Z
|
2022-01-07T07:26:50.000Z
|
src/celery_longterm_scheduler/__init__.py
|
ZeitOnline/celery_longterm_scheduler
|
5dca063cefdf66b3065472f9f4672041640f132a
|
[
"BSD-3-Clause"
] | 7
|
2018-05-03T06:30:40.000Z
|
2022-03-29T14:20:51.000Z
|
from celery_longterm_scheduler.task import Task
from celery_longterm_scheduler.scheduler import get_scheduler
| 36.666667
| 61
| 0.909091
| 15
| 110
| 6.333333
| 0.466667
| 0.210526
| 0.378947
| 0.568421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 110
| 2
| 62
| 55
| 0.931373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
38b51810039ac91b10a0b92786e4d45c67e07913
| 40
|
py
|
Python
|
BOJ/17000~17999/17300~17399/17362.py
|
shinkeonkim/today-ps
|
f3e5e38c5215f19579bb0422f303a9c18c626afa
|
[
"Apache-2.0"
] | 2
|
2020-01-29T06:54:41.000Z
|
2021-11-07T13:23:27.000Z
|
BOJ/17000~17999/17300~17399/17362.py
|
shinkeonkim/Today_PS
|
bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44
|
[
"Apache-2.0"
] | null | null | null |
BOJ/17000~17999/17300~17399/17362.py
|
shinkeonkim/Today_PS
|
bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44
|
[
"Apache-2.0"
] | null | null | null |
print([2,1,2,3,4,5,4,3][int(input())%8])
| 40
| 40
| 0.55
| 12
| 40
| 1.833333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225
| 0
| 40
| 1
| 40
| 40
| 0.325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
38c30571b1aaf2574cd142298d6845d7d1ca68c1
| 343
|
py
|
Python
|
guipyg/__init__.py
|
neccarus/Gui-Pyg
|
e69eb9e7f75b67b0cb70f211e5b3e602a920502b
|
[
"BSD-3-Clause"
] | null | null | null |
guipyg/__init__.py
|
neccarus/Gui-Pyg
|
e69eb9e7f75b67b0cb70f211e5b3e602a920502b
|
[
"BSD-3-Clause"
] | null | null | null |
guipyg/__init__.py
|
neccarus/Gui-Pyg
|
e69eb9e7f75b67b0cb70f211e5b3e602a920502b
|
[
"BSD-3-Clause"
] | null | null | null |
from .gui_element.element import Element
from .gui_element.element import encode_element
from .gui_element.toggleable_element import ToggleableElement
from .gui_element.element_group import ElementGroup
from .gui_element.button import Button
from .gui_element.popup import Popup
from .gui_element.menu import Menu
from .gui import GUI
| 38.111111
| 62
| 0.83965
| 49
| 343
| 5.673469
| 0.244898
| 0.201439
| 0.352518
| 0.226619
| 0.194245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116618
| 343
| 8
| 63
| 42.875
| 0.917492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
38c83e43ade0cad17026c94bfe7621f025fbf8af
| 4,543
|
py
|
Python
|
credit_china/config.py
|
pythonyhd/reverse_spider
|
5922e39bee47bf4114ab06670f49e32eb1bc4b1d
|
[
"Apache-2.0"
] | 8
|
2020-03-30T06:54:09.000Z
|
2022-03-23T09:56:24.000Z
|
credit_china/config.py
|
pythonyhd/reverse_spider
|
5922e39bee47bf4114ab06670f49e32eb1bc4b1d
|
[
"Apache-2.0"
] | 1
|
2022-03-02T15:02:21.000Z
|
2022-03-02T15:02:21.000Z
|
credit_china/config.py
|
pythonyhd/reverse_spider
|
5922e39bee47bf4114ab06670f49e32eb1bc4b1d
|
[
"Apache-2.0"
] | 3
|
2020-05-03T05:07:00.000Z
|
2022-03-23T09:56:24.000Z
|
# -*- coding: utf-8 -*-
# 信用陕西-行政处罚
xinyong_shanxi_settings = {
"RETRY_ENABLED": True,
"RETRY_TIMES": '9',
"DOWNLOAD_TIMEOUT": '20',
"ITEM_PIPELINES": {
"credit_china.pipelines.CreditChinaPipeline": 300,
"credit_china.pipelines.MongodbIndexPipeline": 340,
# "credit_china.pipelines.MysqlTwistedPipeline": 360,
},
"DOWNLOADER_MIDDLEWARES": {
"credit_china.middlewares.RandomUserAgentMiddleware": 400,
"credit_china.middlewares.RandomProxyMiddlerware": 410,
"credit_china.middlewares.RewriteRetryMiddleware": 420,
}
}
# 中国市场监管行政处罚文书网
cfws_settings = {
"RETRY_ENABLED": True,
"RETRY_TIMES": '9',
"DOWNLOAD_TIMEOUT": '20',
"ITEM_PIPELINES": {
"credit_china.pipelines.CreditChinaPipeline": 300,
"credit_china.pipelines.MongodbIndexPipeline": 340,
# "credit_china.pipelines.MysqlTwistedPipeline": 360,
},
"DOWNLOADER_MIDDLEWARES": {
"credit_china.middlewares.RandomUserAgentMiddleware": 400,
"credit_china.middlewares.RandomProxyMiddlerware": 410,
"credit_china.middlewares.RewriteRetryMiddleware": 420,
}
}
# 阿里巴巴文学
aliwx_settings = {
"REDIRECT_ENABLED": False,
"RETRY_ENABLED": True,
"RETRY_TIMES": '9',
"DOWNLOAD_TIMEOUT": '20',
"SCHEDULER": "scrapy_redis.scheduler.Scheduler",
"DUPEFILTER_CLASS": "scrapy_redis.dupefilter.RFPDupeFilter",
"SCHEDULER_QUEUE_CLASS": "scrapy_redis.queue.SpiderPriorityQueue",
"SCHEDULER_PERSIST": True,
"ITEM_PIPELINES": {
"credit_china.pipelines.CreditChinaPipeline": 300,
"credit_china.pipelines.MongodbIndexPipeline": 340,
# "credit_china.pipelines.MysqlTwistedPipeline": 360,
},
"DOWNLOADER_MIDDLEWARES": {
"credit_china.middlewares.RandomUserAgentMiddleware": 400,
"credit_china.middlewares.RandomProxyMiddlerware": 410,
"credit_china.middlewares.RewriteRetryMiddleware": 420,
}
}
# 崔老板电影
movie_settings = {
"RETRY_ENABLED": True,
"RETRY_TIMES": '9',
"DOWNLOAD_TIMEOUT": '20',
# "ITEM_PIPELINES": {
# "credit_china.pipelines.CreditChinaPipeline": 300,
# "credit_china.pipelines.MongodbIndexPipeline": 340,
# # "credit_china.pipelines.MysqlTwistedPipeline": 360,
# },
"DOWNLOADER_MIDDLEWARES": {
"credit_china.middlewares.RandomUserAgentMiddleware": 400,
# "credit_china.middlewares.RandomProxyMiddlerware": 410,
# "credit_china.middlewares.RewriteRetryMiddleware": 420,
},
}
# 二郎查配置信息
erlang_settings = {
"RETRY_ENABLED": True,
"RETRY_TIMES": '9',
"DOWNLOAD_TIMEOUT": '20',
"ITEM_PIPELINES": {
"credit_china.pipelines.CreditChinaPipeline": 300,
"credit_china.pipelines.MongodbIndexPipeline": 340,
# "credit_china.pipelines.MysqlTwistedPipeline": 360,
},
"DOWNLOADER_MIDDLEWARES": {
"credit_china.middlewares.RandomUserAgentMiddleware": 400,
"credit_china.middlewares.RandomProxyMiddlerware": 410,
"credit_china.middlewares.RewriteRetryMiddleware": 420,
},
}
# 夜幕论坛,集成selenium
# 80电影网,分布式简单例子
movies80_settings = {
"RETRY_ENABLED": True,
"RETRY_TIMES": '9',
"DOWNLOAD_TIMEOUT": '20',
"SCHEDULER": "scrapy_redis.scheduler.Scheduler",
"DUPEFILTER_CLASS": "scrapy_redis.dupefilter.RFPDupeFilter",
"SCHEDULER_QUEUE_CLASS": "scrapy_redis.queue.SpiderPriorityQueue",
"SCHEDULER_PERSIST": True,
# "ITEM_PIPELINES": {
# # "credit_china.pipelines.CreditChinaPipeline": 300,
# "credit_china.pipelines.MongodbIndexPipeline": 340,
# # "credit_china.pipelines.MysqlTwistedPipeline": 360,
# },
"DOWNLOADER_MIDDLEWARES": {
"credit_china.middlewares.RandomUserAgentMiddleware": 400,
# "credit_china.middlewares.RandomProxyMiddleware": 410,
# "credit_china.middlewares.RewriteRetryMiddleware": 420,
},
}
# 毛毛租
maomao_settings = {
"RETRY_ENABLED": True,
"RETRY_TIMES": '9',
"DOWNLOAD_TIMEOUT": '20',
# "ITEM_PIPELINES": {
# # "credit_china.pipelines.CreditChinaPipeline": 300,
# "credit_china.pipelines.MongodbIndexPipeline": 340,
# # "credit_china.pipelines.MysqlTwistedPipeline": 360,
# },
"DOWNLOADER_MIDDLEWARES": {
"credit_china.middlewares.RandomUserAgentMiddleware": 400,
# "credit_china.middlewares.RandomProxyMiddleware": 410,
# "credit_china.middlewares.RewriteRetryMiddleware": 420,
},
}
| 29.309677
| 70
| 0.675985
| 375
| 4,543
| 7.922667
| 0.162667
| 0.155503
| 0.141367
| 0.049478
| 0.948502
| 0.948502
| 0.948502
| 0.948502
| 0.948502
| 0.948502
| 0
| 0.041678
| 0.197227
| 4,543
| 155
| 71
| 29.309677
| 0.772964
| 0.274928
| 0
| 0.707865
| 0
| 0
| 0.593856
| 0.453456
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c7fbe4103823518fb97e673b5d6d0ab64b00bb31
| 102
|
py
|
Python
|
tools/string_table_parser/__init__.py
|
fingerco/sims-4-mac-modding-tools
|
47d4fb8b6065b455f27cc1487cb217f2fc201641
|
[
"Apache-2.0"
] | 3
|
2020-06-13T16:16:43.000Z
|
2021-06-19T22:23:13.000Z
|
tools/string_table_parser/__init__.py
|
fingerco/sims-4-mac-modding-tools
|
47d4fb8b6065b455f27cc1487cb217f2fc201641
|
[
"Apache-2.0"
] | 2
|
2021-03-31T19:27:34.000Z
|
2021-12-13T20:21:58.000Z
|
tools/string_table_parser/__init__.py
|
fingerco/sims-4-mac-modding-tools
|
47d4fb8b6065b455f27cc1487cb217f2fc201641
|
[
"Apache-2.0"
] | 1
|
2020-05-13T19:59:40.000Z
|
2020-05-13T19:59:40.000Z
|
from .string_table_reader import StringTableReader
from .string_table_writer import StringTableWriter
| 34
| 50
| 0.901961
| 12
| 102
| 7.333333
| 0.666667
| 0.227273
| 0.340909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 102
| 2
| 51
| 51
| 0.93617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2a33e8399eec92644cba1c35fb5dde23700c8416
| 175
|
py
|
Python
|
Leetcode/357. Count Numbers with Unique Digits/solution2.py
|
asanoviskhak/Outtalent
|
c500e8ad498f76d57eb87a9776a04af7bdda913d
|
[
"MIT"
] | 51
|
2020-07-12T21:27:47.000Z
|
2022-02-11T19:25:36.000Z
|
Leetcode/357. Count Numbers with Unique Digits/solution2.py
|
CrazySquirrel/Outtalent
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
[
"MIT"
] | null | null | null |
Leetcode/357. Count Numbers with Unique Digits/solution2.py
|
CrazySquirrel/Outtalent
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
[
"MIT"
] | 32
|
2020-07-27T13:54:24.000Z
|
2021-12-25T18:12:50.000Z
|
class Solution:
def countNumbersWithUniqueDigits(self, n: int) -> int:
return sum([1, 9, 81, 648, 4536, 27216, 136080, 544320, 1632960, 3265920, 3265920][:n + 1])
| 43.75
| 99
| 0.657143
| 23
| 175
| 5
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.35461
| 0.194286
| 175
| 3
| 100
| 58.333333
| 0.460993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
aa7fe8fdf3a6836cb49b136f5bbaf5426b33a73c
| 8,840
|
py
|
Python
|
timelight_ai_python_api_client/api/day_api.py
|
timelight-ai/python-api-client
|
7e14341a89e8b7e1b4b0730416f6ddd3ef66ef39
|
[
"MIT"
] | null | null | null |
timelight_ai_python_api_client/api/day_api.py
|
timelight-ai/python-api-client
|
7e14341a89e8b7e1b4b0730416f6ddd3ef66ef39
|
[
"MIT"
] | null | null | null |
timelight_ai_python_api_client/api/day_api.py
|
timelight-ai/python-api-client
|
7e14341a89e8b7e1b4b0730416f6ddd3ef66ef39
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
timelight
This is the timelight api. # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from timelight_ai_python_api_client.api_client import ApiClient
class DayApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def v1_day_bulk_patch(self, days_patch_dto, **kwargs): # noqa: E501
"""Update day entities # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_day_bulk_patch(days_patch_dto, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DaysPatchDto days_patch_dto: (required)
:return: DayListDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.v1_day_bulk_patch_with_http_info(days_patch_dto, **kwargs) # noqa: E501
else:
(data) = self.v1_day_bulk_patch_with_http_info(days_patch_dto, **kwargs) # noqa: E501
return data
def v1_day_bulk_patch_with_http_info(self, days_patch_dto, **kwargs): # noqa: E501
"""Update day entities # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_day_bulk_patch_with_http_info(days_patch_dto, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DaysPatchDto days_patch_dto: (required)
:return: DayListDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['days_patch_dto'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method v1_day_bulk_patch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'days_patch_dto' is set
if ('days_patch_dto' not in params or
params['days_patch_dto'] is None):
raise ValueError("Missing the required parameter `days_patch_dto` when calling `v1_day_bulk_patch`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'days_patch_dto' in params:
body_params = params['days_patch_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/day/bulk', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DayListDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def v1_day_list_source_id_year_get(self, year, source_id, **kwargs): # noqa: E501
"""List day data of the reference year # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_day_list_source_id_year_get(year, source_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param float year: (required)
:param float source_id: (required)
:return: DayListDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.v1_day_list_source_id_year_get_with_http_info(year, source_id, **kwargs) # noqa: E501
else:
(data) = self.v1_day_list_source_id_year_get_with_http_info(year, source_id, **kwargs) # noqa: E501
return data
def v1_day_list_source_id_year_get_with_http_info(self, year, source_id, **kwargs): # noqa: E501
"""List day data of the reference year # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_day_list_source_id_year_get_with_http_info(year, source_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param float year: (required)
:param float source_id: (required)
:return: DayListDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['year', 'source_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method v1_day_list_source_id_year_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'year' is set
if ('year' not in params or
params['year'] is None):
raise ValueError("Missing the required parameter `year` when calling `v1_day_list_source_id_year_get`") # noqa: E501
# verify the required parameter 'source_id' is set
if ('source_id' not in params or
params['source_id'] is None):
raise ValueError("Missing the required parameter `source_id` when calling `v1_day_list_source_id_year_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'year' in params:
path_params['year'] = params['year'] # noqa: E501
if 'source_id' in params:
path_params['sourceId'] = params['source_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/day/list/{sourceId}/{year}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DayListDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 37.299578
| 134
| 0.617873
| 1,075
| 8,840
| 4.780465
| 0.146977
| 0.046702
| 0.035026
| 0.024518
| 0.858338
| 0.835182
| 0.818447
| 0.795875
| 0.753454
| 0.753454
| 0
| 0.018687
| 0.291742
| 8,840
| 236
| 135
| 37.457627
| 0.802108
| 0.313009
| 0
| 0.672131
| 1
| 0
| 0.189132
| 0.045463
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040984
| false
| 0
| 0.032787
| 0
| 0.131148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aa869d5043f37c45fc97940a6128e8ec3061a797
| 23,834
|
py
|
Python
|
pyfilter/__init__.py
|
JuantonioMS/pyngs
|
5c929e68c975aae94669d0a0ff29ceb462de9b5e
|
[
"MIT"
] | null | null | null |
pyfilter/__init__.py
|
JuantonioMS/pyngs
|
5c929e68c975aae94669d0a0ff29ceb462de9b5e
|
[
"MIT"
] | null | null | null |
pyfilter/__init__.py
|
JuantonioMS/pyngs
|
5c929e68c975aae94669d0a0ff29ceb462de9b5e
|
[
"MIT"
] | null | null | null |
import argparse
import sys
import time
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
import tempfile
import collections
"""CLASE FASTQ"""
class FastQ(object):
def __init__(self, name, sequence, extra, quality, phred_value=None):
self.name = name
self.sequence = sequence
self.extra = extra
self.quality = quality
self.N_chain = "NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN"
self.phred = phred_value
if self.phred is not None:
self.asciiconversion()
def __str__(self):
return "\n".join([self.name, self.sequence, self.extra, self.quality]) + "\n"
def __len__(self, qualen = False):
if qualen:
return len(self.quality)
else:
return len(self.sequence)
def qualitysec(self):
qualities = [(ord(character) - self.phred) for character in self.quality]
return sum(qualities) / (len(qualities))
def gc_proportion(self):
g_number = self.sequence.count("G")
c_number = self.sequence.count("C")
proportion = float(g_number + c_number) / len(self.sequence)
return int(proportion * 100)
def nucproportion(self, nuc):
proportion = self.sequence.count(nuc) / float(len(self.sequence)) * 100
return int(proportion)
def asciidetection(self, phredconverter = None):
low_list = [chr(character) for character in range(33,59)]
high_list = [chr(character) for character in range(74,127)]
phred_value = int()
for ascii_letter in self.quality:
if ascii_letter in low_list:
phred_value = 33
elif ascii_letter in high_list:
phred_value = 64
if phred_value:
return phred_value
break
def asciiconversion(self):
self.converted = [(ord(character) - self.phred) for character in self.quality]
return self.converted
def minquality(self, min_quality):
position = None
while position == None:
try:
position = self.converted.index(min_quality - 1)
except ValueError:
min_quality = min_quality + 1
if min_quality > 41:
sys.stdout.write("Min set over 41")
break
transformed_seq = self.sequence[:position]
transformed_qual = self.quality[:position]
if len(transformed_seq) == 0:
return False, transformed_seq, transformed_qual
else:
return True, transformed_seq, transformed_qual
def maxquality(self, max_quality):
position = None
while position == None:
print max_quality
try:
position = self.converted.index(max_quality)
except ValueError:
max_quality = max_quality - 1
if max_quality < 0:
sys.stdout.write("Max set to 0")
break
transformed_seq = self.sequence[position:]
transformed_qual = self.quality[position:]
if len(transformed_seq) == 0:
return False, transformed_seq, transformed_qual
else:
return True, transformed_seq, transformed_qual
def nucnumber(self, nuc_number):
if nuc_number <= len(self.sequence):
transformed_seq = self.sequence[:nuc_number]
transformed_qual = self.quality[:nuc_number]
return True, transformed_seq, transformed_qual
else:
diference = nuc_number - len(self.sequence)
transformed_seq = self.sequence + self.N_chain[:diference]
transformed_qual = self.quality
while len(transformed_seq) != len(transformed_qual):
transformed_qual = self.quality + self.quality[-1]
return True, transformed_seq, transformed_qual
def gcpercentage(self, gc_percentage):
if self.gc_proportion() <= gc_percentage:
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def GCpercentage(self, gc_percentage):
if self.gc_proportion() >= gc_percentage:
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def meanquality(self, mean_quality):
if self.qualitysec() <= mean_quality:
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def MEANquality(self, mean_quality):
if self.qualitysec() >= mean_quality:
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def nucpercentage(self, nuc, nuc_percentage):
if int(nuc_percentage) >= self.nucproportion(nuc):
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def NUCpercentage(self, nuc, nuc_percentage):
if nucproportion(nuc) >= int(nuc_percentage):
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def removeduplicants(self,data):
if data[self.sequence] >= 2:
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def POLYXfilter(self, size):
counter_a = 0
counter_t = 0
for nuc in self.sequence:
if nuc == "T":
counter_a = 0
counter_t += 1
elif nuc == "A":
counter_a += 1
counter_t = 0
elif nuc != "A" and nuc != "T":
counter_a = 0
counter_t = 0
if counter_a == size or counter_t == size:
return False, self.sequence, self.quality
break
return True, self.sequence, self.quality
def polyxfilter(self, size):
counter_a = 0
counter_t = 0
for nuc in self.sequence:
if nuc == "T":
counter_a = 0
counter_t += 1
elif nuc == "A":
counter_a += 1
counter_t = 0
elif nuc != "A" and nuc != "T":
counter_a = 0
counter_t = 0
if counter_a == size:
add_chain = ""
while len(add_chain) != size:
add_chain = add_chain + "A"
return True, self.sequence.replace(add_chain,self.N_chain[:size]), self.quality
break
if counter_t == size:
add_chain = ""
while len(add_chain) != size:
add_chain = add_chain + "T"
return True, self.sequence.replace(add_chain,self.N_chain[:size]), self.quality
break
return True, self.sequence, self.quality
def gcregion(self, size):
counter = 0
for nuc in self.sequence:
if nuc == "G" or nuc == "C":
counter += 1
else:
counter = 0
if counter == size:
return False, self.sequence, self.quality
break
return True, self.sequence, self.quality
def qualitynchanger(self, min_quality):
transformed_seq = ""
for position, (nuc, qual) in enumerate(zip(self.sequence, self.converted)):
if qual <= min_quality:
transformed_seq = transformed_seq + "N"
else:
transformed_seq = transformed_seq + self.sequence[position]
return True, transformed_seq, self.quality
def Qualitynchanger(self, max_quality):
transformed_seq = ""
for position, (nuc, qual) in enumerate(zip(self.sequence, self.converted)):
if qual >= max_quality:
transformed_seq = transformed_seq + "N"
else:
transformed_seq = transformed_seq + self.sequence[position]
return True, transformed_seq, self.quality
def complement(self):
seq = Seq(self.sequence, IUPAC.unambiguous_dna)
complement = str(seq.complement())
return True, complement, self.quality
def reversecomplement(self):
seq = Seq(self.sequence, IUPAC.unambiguous_dna)
reverse_complement = str(seq.reverse_complement())
return True, reverse_complement, reverse(self.quality)
def transcription(self):
seq = Seq(self.sequence, IUPAC.unambiguous_dna)
transcript = str(seq.transcribe())
return True, transcript, self.quality
def retrotranscription(self):
seq = Seq(self.sequence, IUPAC.unambiguous_rna)
retro_transcript = str(seq.back_transcribe())
return True, retro_transcript, self.quality
def righttrimmer(self, size):
return True, self.sequence[:-size], self.quality[:-size]
def lefttrimmer(self, size):
return True, self.sequence[size:], self.quality[size:]
class FastqReader:
def __init__(self, filename):
self.filename = filename
def __iter__(self):
return self
def ___next__(self):
return self.next()
def next(self):
try:
name = next(self.filename).strip()
sequence = next(self.filename).strip()
extra = next(self.filename).strip()
quality = next(self.filename).strip()
return FastQ(name, sequence, extra, quality)
except StopIteration:
raise StopIteration
def sampling(self, phred_value):
n = 4
for i, line in enumerate(self.filename):
if i % n == 0:
name = line.strip().split()[0]
elif i % n == 1:
seq = line.strip()
elif i % n == 2:
strand = line.strip()
elif i % n == 3:
qual = line.strip()
yield FastQ(name=name, sequence=seq, extra=strand, quality=qual, phred_value=phred_value)
def __enter__(self):
return self
def __exit__(self, *args):
self.filename.close()
"""CLASE FASTA"""
class Fasta(object):
def __init__(self, name, sequence):
self.name = name
self.sequence = sequence
print "-------------"
print self.sequence
def __len__(self):
return len(self.sequence)
def __str__(self):
return "\n".join([self.name, self.sequence]) + "\n"
def nucproportion(self, nuc):
proportion = self.sequence.count(nuc) / float(len(self.sequence)) * 100
return int(proportion)
def righttrimmer(self, size):
return True, self.sequence[:-size]
def lefttrimmer(self, size):
return True, self.sequence[size:]
def removenucpercentage(self, nuc, nuc_percentage):
if self.nucproportion(nuc) <= int(nuc_percentage):
return False, self.sequence
else:
return True, self.sequence
def removeNUCpercentage(self, nuc, nuc_percentage):
if self.nucproportion(nuc) >= int(nuc_percentage):
return False, self.sequence
else:
return True, self.sequence
def removeambiguous(self):
for character in self.sequence:
if caracter in "RYKMSWBDHVNX-":
return False, self.sequence
else:
return True, self.sequence
def removeduplicants(self, data):
if data[self.sequence] >= 2:
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def complement(self):
seq = Seq(self.sequence, IUPAC.unambiguous_dna)
complement = str(seq.complement())
return True, complement, self.quality
def reversecomplement(self):
seq = Seq(self.sequence, IUPAC.unambiguous_dna)
reverse_complement = str(seq.reverse_complement())
return True, reverse_complement
def transcription(self):
seq = Seq(self.sequence, IUPAC.unambiguous_dna)
transcript = str(seq.transcribe())
return True, transcript
def retrotranscription(self):
seq = Seq(self.sequence, IUPAC.unambiguous_rna)
retro_transcript = str(seq.back_transcribe())
return True, retro_transcript
def nucnumber(self, nuc_number):
if nuc_number <= len(self.sequence):
transformed_seq = self.sequence[:nuc_number]
return True, transformed_seq
"""CLASE SAM"""
class Sam(object):
def __init__(self, line):
self.qname = str(line[0])
self.flag = int(line[1])
self.rname = str(line[2])
self.position = int(line[3])
self.mapq = int(line[4])
self.cigar = str(line[5])
self.rnext = str(line[6])
self.pnext = int(line[7])
self.length = int(line[8])
self.sequence = str(line[9])
self.quality = str(line[10])
self.N_chain = "NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN"
self.phred = phred_value
if self.phred is not None:
self.asciiconversion()
def __str__(self):
return "\t".join([self.qname, str(self.flag), self.rname, str(self.position), str(self.mapq), str(self.cigar), self.rnext, str(self.pnext), str(self.length), self.sequence, self.quality]) + "\n"
def __len__(self):
return len(self.sequence)
def qualitysec(self):
qualities = [(ord(character) - self.phred) for character in self.quality]
return sum(qualities) / (len(qualities))
def gc_proportion(self):
g_number = self.sequence.count("G")
c_number = self.sequence.count("C")
proportion = float(g_number + c_number) / len(self.sequence)
return int(proportion * 100)
def nucproportion(self, nuc):
proportion = self.sequence.count(nuc) / float(len(self.sequence)) * 100
return int(proportion)
def asciidetection(self, phredconverter = None):
low_list = [chr(character) for character in range(33,59)]
high_list = [chr(character) for character in range(74,127)]
phred_value = int()
for ascii_letter in self.quality:
if ascii_letter in low_list:
phred_value = 33
elif ascii_letter in high_list:
phred_value = 64
if phred_value:
return phred_value
break
def asciiconversion(self):
self.converted = [(ord(character) - self.phred) for character in self.quality]
return self.converted
def minquality(self, min_quality):
position = None
while position == None:
try:
position = self.converted.index(min_quality - 1)
except ValueError:
min_quality = min_quality + 1
if min_quality > 41:
sys.stdout.write("Min set over 41")
break
transformed_seq = self.sequence[:position]
transformed_qual = self.quality[:position]
if len(transformed_seq) == 0:
return False, transformed_seq, transformed_qual
else:
return True, transformed_seq, transformed_qual
def maxquality(self, max_quality):
position = None
while position == None:
print max_quality
try:
position = self.converted.index(max_quality)
except ValueError:
max_quality = max_quality - 1
if max_quality < 0:
sys.stdout.write("Max set to 0")
break
transformed_seq = self.sequence[position:]
transformed_qual = self.quality[position:]
if len(transformed_seq) == 0:
return False, transformed_seq, transformed_qual
else:
return True, transformed_seq, transformed_qual
def nucnumber(self, nuc_number):
if nuc_number <= len(self.sequence):
transformed_seq = self.sequence[:nuc_number]
transformed_qual = self.quality[:nuc_number]
return True, transformed_seq, transformed_qual
else:
diference = nuc_number - len(self.sequence)
transformed_seq = self.sequence + self.N_chain[:diference]
transformed_qual = self.quality
while len(transformed_seq) != len(transformed_qual):
transformed_qual = self.quality + self.quality[-1]
return True, transformed_seq, transformed_qual
def gcpercentage(self, gc_percentage):
if self.gc_proportion() <= gc_percentage:
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def GCpercentage(self, gc_percentage):
if self.gc_proportion() >= gc_percentage:
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def meanquality(self, mean_quality):
if self.qualitysec() <= mean_quality:
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def MEANquality(self, mean_quality):
if self.qualitysec() >= mean_quality:
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def nucpercentage(self, nuc, nuc_percentage):
if int(nuc_percentage) >= self.nucproportion(nuc):
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def NUCpercentage(self, nuc, nuc_percentage):
if nucproportion(nuc) >= int(nuc_percentage):
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def removeduplicants(self,data):
if data[self.sequence] >= 2:
return False, self.sequence, self.quality
else:
return True, self.sequence, self.quality
def POLYXfilter(self, size):
counter_a = 0
counter_t = 0
for nuc in self.sequence:
if nuc == "T":
counter_a = 0
counter_t += 1
elif nuc == "A":
counter_a += 1
counter_t = 0
elif nuc != "A" and nuc != "T":
counter_a = 0
counter_t = 0
if counter_a == size or counter_t == size:
return False, self.sequence, self.quality
break
return True, self.sequence, self.quality
def polyxfilter(self, size):
counter_a = 0
counter_t = 0
for nuc in self.sequence:
if nuc == "T":
counter_a = 0
counter_t += 1
elif nuc == "A":
counter_a += 1
counter_t = 0
elif nuc != "A" and nuc != "T":
counter_a = 0
counter_t = 0
if counter_a == size:
add_chain = ""
while len(add_chain) != size:
add_chain = add_chain + "A"
return True, self.sequence.replace(add_chain,self.N_chain[:size]), self.quality
break
if counter_t == size:
add_chain = ""
while len(add_chain) != size:
add_chain = add_chain + "T"
return True, self.sequence.replace(add_chain,self.N_chain[:size]), self.quality
break
return True, self.sequence, self.quality
def gcregion(self, size):
counter = 0
for nuc in self.sequence:
if nuc == "G" or nuc == "C":
counter += 1
else:
counter = 0
if counter == size:
return False, self.sequence, self.quality
break
return True, self.sequence, self.quality
def qualitynchanger(self, min_quality):
transformed_seq = ""
for position, (nuc, qual) in enumerate(zip(self.sequence, self.converted)):
if qual <= min_quality:
transformed_seq = transformed_seq + "N"
else:
transformed_seq = transformed_seq + self.sequence[position]
return True, transformed_seq, self.quality
def Qualitynchanger(self, max_quality):
transformed_seq = ""
for position, (nuc, qual) in enumerate(zip(self.sequence, self.converted)):
if qual >= max_quality:
transformed_seq = transformed_seq + "N"
else:
transformed_seq = transformed_seq + self.sequence[position]
return True, transformed_seq, self.quality
def complement(self):
seq = Seq(self.sequence, IUPAC.unambiguous_dna)
complement = str(seq.complement())
return True, complement, self.quality
def reversecomplement(self):
seq = Seq(self.sequence, IUPAC.unambiguous_dna)
reverse_complement = str(seq.reverse_complement())
return True, reverse_complement, reverse(self.quality)
def transcription(self):
seq = Seq(self.sequence, IUPAC.unambiguous_dna)
transcript = str(seq.transcribe())
return True, transcript, self.quality
def retrotranscription(self):
seq = Seq(self.sequence, IUPAC.unambiguous_rna)
retro_transcript = str(seq.back_transcribe())
return True, retro_transcript, self.quality
def righttrimmer(self, size):
return True, self.sequence[:-size], self.quality[:-size]
def lefttrimmer(self, size):
return True, self.sequence[size:], self.quality[size:]
class FastaReader:
def __init__(self, filename):
self.filename = filename
def __iter__(self):
return self
def ___next__(self):
return self.next()
def next(self):
try:
name = next(self.filename).strip()
sequence = next(self.filename).strip()
return Fasta(name, sequence)
except StopIteration:
raise StopIteration
def sampling(self):
sequence= ""
for line in self.filename:
if line[0] == ">":
if sequence != "":
seq = sequence
yield Fasta(name, seq)
name = line.strip()
sequence = ""
else:
sequence = sequence + line
def __enter__(self):
return self
def __exit__(self, *args):
self.filename.close()
def asciidetection(quality):
low_list = [chr(character) for character in range(33,59)]
high_list = [chr(character) for character in range(74,127)]
phred_value = int()
for ascii_letter in quality:
if ascii_letter in low_list:
phred_value = 33
elif ascii_letter in high_list:
phred_value = 64
if phred_value:
return phred_value
break
def reverse(list):
if len(list)==1:
return list
else:
return list[-1]+reverse(list[:-1])
def switch(firstflag, secondflag):
if firstflag == True and secondflag == False:
firstflag = False
secondflag = True
elif firstflag == False and secondflag == True:
firstflag = True
secondflag = False
else:
sys.exit("Warning! Switch failure!")
return firstflag, secondflag
def debug(VERBOSE_LEVEL, *args, **kwargs):
if VERBOSE_LEVEL == DEBUG_LEVEL:
print DEBUG_LEVEL + "! " + "".join(str(x) for x in args)
def run(arguments):
print "Iniciando el programa"
| 30.055485
| 202
| 0.578459
| 2,634
| 23,834
| 5.088838
| 0.071374
| 0.110116
| 0.057296
| 0.070352
| 0.887645
| 0.882945
| 0.869517
| 0.861758
| 0.858699
| 0.855566
| 0
| 0.008789
| 0.326928
| 23,834
| 792
| 203
| 30.093434
| 0.826767
| 0
| 0
| 0.841837
| 0
| 0
| 0.011856
| 0.004625
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.011905
| null | null | 0.010204
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2a9ea182c2d89c599b9b9954f7dcf862be383add
| 4,124
|
py
|
Python
|
tests/test_conformers.py
|
hengwei-chan/fragmentation_and_assemble
|
35b56dd1c97cfa6731bcbfd68c3eb01bfcd7668f
|
[
"Apache-2.0"
] | 130
|
2021-04-08T11:15:27.000Z
|
2022-03-25T01:51:47.000Z
|
tests/test_conformers.py
|
hengwei-chan/fragmentation_and_assemble
|
35b56dd1c97cfa6731bcbfd68c3eb01bfcd7668f
|
[
"Apache-2.0"
] | 72
|
2021-04-08T11:46:51.000Z
|
2022-03-29T01:27:41.000Z
|
tests/test_conformers.py
|
hengwei-chan/fragmentation_and_assemble
|
35b56dd1c97cfa6731bcbfd68c3eb01bfcd7668f
|
[
"Apache-2.0"
] | 11
|
2021-04-20T10:27:38.000Z
|
2022-03-07T07:29:30.000Z
|
# type: ignore
import pytest
import numpy as np
import datamol as dm
def test_generate():
with pytest.raises(ValueError):
smiles = "CCCC"
mol = dm.to_mol(smiles)
mol = dm.conformers.generate(mol, method="custom_method")
smiles = "CCCC"
mol = dm.to_mol(smiles)
mol = dm.conformers.generate(mol, rms_cutoff=None, minimize_energy=False)
assert mol.GetNumConformers() == 50
assert mol.GetConformer(0).GetPositions().shape == (4, 3)
smiles = "CCCC"
mol = dm.to_mol(smiles)
mol = dm.conformers.generate(mol, rms_cutoff=None, minimize_energy=True)
assert mol.GetNumConformers() == 50
assert "rdkit_uff_energy" in mol.GetConformer(0).GetPropsAsDict()
smiles = "CCCC"
mol = dm.to_mol(smiles)
mol = dm.conformers.generate(mol, rms_cutoff=1, minimize_energy=False)
assert mol.GetNumConformers() == 23
assert mol.GetConformer(0).GetPositions().shape == (4, 3)
smiles = "CCCC"
mol = dm.to_mol(smiles)
mol = dm.conformers.generate(mol, rms_cutoff=1, minimize_energy=True)
assert mol.GetNumConformers() == 25
assert "rdkit_uff_energy" in mol.GetConformer(0).GetPropsAsDict()
@pytest.mark.skip_platform("win")
def test_sasa():
with pytest.raises(ValueError):
smiles = "O=C(C)Oc1ccccc1C(=O)O"
mol = dm.to_mol(smiles)
mol = dm.conformers.sasa(mol)
smiles = "CCCC=O"
mol = dm.to_mol(smiles)
mol = dm.conformers.generate(mol, minimize_energy=True)
sasa = dm.conformers.sasa(mol)
assert sasa.shape == (50,)
def test_rmsd():
with pytest.raises(ValueError):
smiles = "O=C(C)Oc1ccccc1C(=O)O"
mol = dm.to_mol(smiles)
mol = dm.conformers.rmsd(mol)
smiles = "CCCC=O"
mol = dm.to_mol(smiles)
mol = dm.conformers.generate(mol, rms_cutoff=None, minimize_energy=True)
rmsd = dm.conformers.rmsd(mol)
assert rmsd.shape == (50, 50)
def test_cluster():
# no centroids
smiles = "O=C(C)Oc1ccccc1C(=O)O"
mol = dm.to_mol(smiles)
mol = dm.conformers.generate(mol, rms_cutoff=None)
clustered_mol = dm.conformers.cluster(mol, centroids=False)
assert len(clustered_mol) == 2
assert clustered_mol[0].GetNumConformers() > 30
assert clustered_mol[1].GetNumConformers() > 5
# centroids
smiles = "O=C(C)Oc1ccccc1C(=O)O"
mol = dm.to_mol(smiles)
mol = dm.conformers.generate(mol, rms_cutoff=None)
clustered_mol = dm.conformers.cluster(mol, centroids=True)
assert clustered_mol.GetNumConformers() == 2
# no centroids - minimize
smiles = "O=C(C)Oc1ccccc1C(=O)O"
mol = dm.to_mol(smiles)
mol = dm.conformers.generate(mol, rms_cutoff=None, minimize_energy=True)
clustered_mol = dm.conformers.cluster(mol, centroids=False)
assert len(clustered_mol) == 2
assert clustered_mol[0].GetNumConformers() > 30
assert clustered_mol[1].GetNumConformers() > 5
# centroids - minimize
smiles = "O=C(C)Oc1ccccc1C(=O)O"
mol = dm.to_mol(smiles)
mol = dm.conformers.generate(mol, rms_cutoff=None, minimize_energy=True)
clustered_mol = dm.conformers.cluster(mol, centroids=True)
assert clustered_mol.GetNumConformers() == 2
def test_get_coords():
mol = dm.to_mol("CC")
mol = dm.conformers.generate(mol, n_confs=1)
assert dm.conformers.get_coords(mol).shape == (2, 3)
def test_center_of_mass():
mol = dm.to_mol("CC")
mol = dm.conformers.generate(mol, n_confs=1)
# geomtric center
center = dm.conformers.center_of_mass(mol, use_atoms=False)
coords = dm.conformers.get_coords(mol)
np.testing.assert_array_almost_equal(coords.mean(axis=0), center)
# mass center
center = dm.conformers.center_of_mass(mol, use_atoms=True)
assert center.shape == (3,)
def test_translate():
mol = dm.to_mol("CC")
mol = dm.conformers.generate(mol, n_confs=1)
coords = dm.conformers.get_coords(mol)
print(coords)
dm.conformers.translate(mol, [10, 10, 10])
new_coords = dm.conformers.get_coords(mol)
print(new_coords - 10)
np.testing.assert_array_almost_equal(coords, new_coords - 10, decimal=1)
| 30.323529
| 77
| 0.678468
| 579
| 4,124
| 4.689119
| 0.145078
| 0.066298
| 0.110497
| 0.058932
| 0.833886
| 0.813996
| 0.760958
| 0.707919
| 0.707919
| 0.669613
| 0
| 0.020317
| 0.188409
| 4,124
| 135
| 78
| 30.548148
| 0.790857
| 0.026188
| 0
| 0.642105
| 1
| 0
| 0.052894
| 0.031437
| 0
| 0
| 0
| 0
| 0.231579
| 1
| 0.073684
| false
| 0
| 0.031579
| 0
| 0.105263
| 0.021053
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2aa859b1598e9c1144997641bd4fa44973fdb560
| 167
|
py
|
Python
|
froide/foirequest/tests/__init__.py
|
OpendataCH/froide
|
8136bac0d8caa56f9cfc7ba15480be987280e55d
|
[
"MIT"
] | null | null | null |
froide/foirequest/tests/__init__.py
|
OpendataCH/froide
|
8136bac0d8caa56f9cfc7ba15480be987280e55d
|
[
"MIT"
] | null | null | null |
froide/foirequest/tests/__init__.py
|
OpendataCH/froide
|
8136bac0d8caa56f9cfc7ba15480be987280e55d
|
[
"MIT"
] | null | null | null |
from .test_admin import * # noqa
from .test_api import * # noqa
from .test_mail import * # noqa
from .test_request import * # noqa
from .test_web import * # noqa
| 27.833333
| 35
| 0.700599
| 25
| 167
| 4.48
| 0.36
| 0.357143
| 0.5
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.209581
| 167
| 5
| 36
| 33.4
| 0.848485
| 0.143713
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
2ac7aed153982a79d7a697c7db9f0c9f55485700
| 8,845
|
py
|
Python
|
crowdgezwitscher/contact/tests/test_mail.py
|
Strassengezwitscher/Crowdgezwitscher
|
afdd433acb35c1a554ba79464b744975de065151
|
[
"MIT"
] | 4
|
2016-07-22T07:20:31.000Z
|
2016-11-13T18:13:34.000Z
|
crowdgezwitscher/contact/tests/test_mail.py
|
Strassengezwitscher/Strassengezwitscher
|
afdd433acb35c1a554ba79464b744975de065151
|
[
"MIT"
] | 402
|
2016-04-26T08:38:17.000Z
|
2022-03-11T23:26:49.000Z
|
crowdgezwitscher/contact/tests/test_mail.py
|
Strassengezwitscher/Crowdgezwitscher
|
afdd433acb35c1a554ba79464b744975de065151
|
[
"MIT"
] | 1
|
2018-01-14T16:58:57.000Z
|
2018-01-14T16:58:57.000Z
|
from django.test import TestCase
from contact.mail import GPGEmailMessage
from contact.utils import GPGException
class MailTests(TestCase):
fixtures = ['gpg_key.json']
def test_basic_encrypt(self):
"""Test if PGP/MIME format requirements look fulfilled and if headers are correct."""
subject = "Dear friend, special offer for you"
body = "Buy cheap viagra"
from_address = "cheap@viagra.biz"
to_addresses = ["john.smith@example.org"]
email = GPGEmailMessage(subject, body, from_address, to_addresses)
self.assertEqual(email.subject, subject)
self.assertEqual(email.body, body)
self.assertEqual(email.from_email, from_address)
self.assertEqual(email.to, to_addresses)
self.assertEqual(email.recipients(), to_addresses)
email = str(email.message())
self.assertTrue('Content-Type: multipart/encrypted;' in email)
self.assertTrue('protocol="application/pgp-encrypted";' in email)
self.assertTrue('Content-Type: application/pgp-encrypted' in email)
self.assertTrue('Content-Type: application/octet-stream; name="encrypted.asc"' in email)
self.assertTrue('Content-Disposition: inline; filename="encrypted.asc"' in email)
self.assertTrue('Content-Description: OpenPGP encrypted message' in email)
self.assertTrue('Content-Description: PGP/MIME Versions Identification' in email)
self.assertEqual(email.count('Content-Transfer-Encoding: 7bit'), 3)
self.assertTrue('Subject: %s' % subject in email)
self.assertTrue('BEGIN PGP MESSAGE' in email)
self.assertTrue('From: %s' % from_address in email)
self.assertTrue('To: %s' % to_addresses[0] in email)
def test_encrypt_with_attachment(self):
"""
Test if PGP/MIME format requirements look fulfilled and if headers are correct when an attachment is present.
As the message is encrypted we cannot directly check for the file being attached.
"""
subject = "Dear friend, special offer for you"
body = "Buy cheap viagra"
from_address = "cheap@viagra.biz"
to_addresses = ["john.smith@example.org"]
email = GPGEmailMessage(subject, body, from_address, to_addresses)
self.assertEqual(len(email.attachments), 0)
attachment_name = "dolphin_diary.txt"
attachment_content = "Thanks for all the fish."
email.attach(attachment_name, attachment_content)
self.assertEqual(len(email.attachments), 1)
self.assertEqual(email.attachments[0][0], attachment_name)
self.assertEqual(email.attachments[0][1], attachment_content)
email = str(email.message())
self.assertTrue('Content-Type: multipart/encrypted;' in email)
self.assertTrue('protocol="application/pgp-encrypted";' in email)
self.assertTrue('Content-Type: application/pgp-encrypted' in email)
self.assertTrue('Content-Type: application/octet-stream; name="encrypted.asc"' in email)
self.assertTrue('Content-Disposition: inline; filename="encrypted.asc"' in email)
self.assertTrue('Content-Description: OpenPGP encrypted message' in email)
self.assertTrue('Content-Description: PGP/MIME Versions Identification' in email)
self.assertEqual(email.count('Content-Transfer-Encoding: 7bit'), 3)
self.assertTrue('Subject: %s' % subject in email)
self.assertTrue('BEGIN PGP MESSAGE' in email)
self.assertTrue('From: %s' % from_address in email)
self.assertTrue('To: %s' % to_addresses[0] in email)
def test_encrypt_with_two_attachments(self):
"""
Test if PGP/MIME format requirements look fulfilled and if headers are correct when two attachments are present.
As the message is encrypted we cannot directly check for the files being attached.
"""
subject = "Dear friend, special offer for you"
body = "Buy cheap viagra"
from_address = "cheap@viagra.biz"
to_addresses = ["john.smith@example.org"]
email = GPGEmailMessage(subject, body, from_address, to_addresses)
self.assertEqual(len(email.attachments), 0)
attachment_name1 = "dolphin_diary.txt"
attachment_name2 = "ancient_dolphin_diary.txt"
attachment_content = "Thanks for all the fish."
email.attach(attachment_name1, attachment_content)
email.attach(attachment_name2, attachment_content)
self.assertEqual(len(email.attachments), 2)
self.assertEqual(email.attachments[0][0], attachment_name1)
self.assertEqual(email.attachments[0][1], attachment_content)
self.assertEqual(email.attachments[1][0], attachment_name2)
self.assertEqual(email.attachments[1][1], attachment_content)
email = str(email.message())
self.assertTrue('Content-Type: multipart/encrypted;' in email)
self.assertTrue('protocol="application/pgp-encrypted";' in email)
self.assertTrue('Content-Type: application/pgp-encrypted' in email)
self.assertTrue('Content-Type: application/octet-stream; name="encrypted.asc"' in email)
self.assertTrue('Content-Disposition: inline; filename="encrypted.asc"' in email)
self.assertTrue('Content-Description: OpenPGP encrypted message' in email)
self.assertTrue('Content-Description: PGP/MIME Versions Identification' in email)
self.assertEqual(email.count('Content-Transfer-Encoding: 7bit'), 3)
self.assertTrue('Subject: %s' % subject in email)
self.assertTrue('BEGIN PGP MESSAGE' in email)
self.assertTrue('From: %s' % from_address in email)
self.assertTrue('To: %s' % to_addresses[0] in email)
def test_encrypt_missing_public_key(self):
"""
Test if a missing GPG public key for at least one receiver leads to an error and the email not being sent.
"""
subject = "Dear friend, special offer for you"
body = "Buy cheap viagra"
from_address = "cheap@viagra.biz"
to_addresses = ["john.smith@example.org", "unknown@person.com"]
email = GPGEmailMessage(subject, body, from_address, to_addresses)
self.assertEqual(len(email.attachments), 0)
self.assertEqual(email.body, body)
self.assertEqual(email.to, to_addresses)
self.assertEqual(email.recipients(), to_addresses)
with self.assertRaises(GPGException):
email.send()
self.assertEqual(len(email.attachments), 0)
def test_additional_mail_headers(self):
"""
Test if some mail headers we do not use still work.
We have overriden the method that is responsible for them and do not want to break existing functionality.
"""
subject = "Dear friend, special offer for you"
body = "Buy cheap viagra"
from_address = "change@me.com"
to_addresses = ["john.smith@example.org"]
cc_addresses = ["john.smith@example.org"]
bcc_addresses = ["john.smith@example.org"]
reply_to_addresses = ["another@address.org"]
second_from_address = 'cheap@viagra.biz'
email = GPGEmailMessage(subject, body, from_address, to_addresses,
cc=cc_addresses, bcc=bcc_addresses, reply_to=reply_to_addresses)
email.extra_headers['Foo'] = 'Bar'
email.extra_headers['From'] = second_from_address
self.assertEqual(email.subject, subject)
self.assertEqual(email.body, body)
self.assertEqual(email.from_email, from_address)
self.assertEqual(email.recipients().sort(), (to_addresses + cc_addresses + bcc_addresses).sort())
email = str(email.message())
self.assertTrue('Content-Type: multipart/encrypted;' in email)
self.assertTrue('protocol="application/pgp-encrypted";' in email)
self.assertTrue('Content-Type: application/pgp-encrypted' in email)
self.assertTrue('Content-Type: application/octet-stream; name="encrypted.asc"' in email)
self.assertTrue('Content-Disposition: inline; filename="encrypted.asc"' in email)
self.assertTrue('Content-Description: OpenPGP encrypted message' in email)
self.assertTrue('Content-Description: PGP/MIME Versions Identification' in email)
self.assertEqual(email.count('Content-Transfer-Encoding: 7bit'), 3)
self.assertTrue('Subject: %s' % subject in email)
self.assertTrue('BEGIN PGP MESSAGE' in email)
self.assertTrue('From: %s' % second_from_address in email)
self.assertTrue('To: %s' % to_addresses[0] in email)
self.assertTrue('Cc: %s' % cc_addresses[0] in email)
self.assertTrue('Reply-To: %s' % reply_to_addresses[0] in email)
self.assertTrue('Foo: Bar' in email)
self.assertEqual(email.count('From:'), 1)
| 53.606061
| 120
| 0.682985
| 1,067
| 8,845
| 5.571696
| 0.146204
| 0.110681
| 0.081413
| 0.137763
| 0.84693
| 0.82069
| 0.799159
| 0.75963
| 0.732212
| 0.732212
| 0
| 0.005559
| 0.206783
| 8,845
| 164
| 121
| 53.932927
| 0.84179
| 0.082872
| 0
| 0.69697
| 0
| 0
| 0.284606
| 0.090989
| 0
| 0
| 0
| 0
| 0.583333
| 1
| 0.037879
| false
| 0
| 0.022727
| 0
| 0.075758
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
631c0f98398f0ff201cbca38361db6d5f3022347
| 52,742
|
py
|
Python
|
intranet/org/migrations/0001_initial.py
|
kiberpipa/Intranet
|
2bc2ffd4e9e9f36c78d0444b60575fa1de562f73
|
[
"BSD-2-Clause-FreeBSD"
] | 3
|
2015-01-20T17:25:37.000Z
|
2021-09-16T17:28:02.000Z
|
intranet/org/migrations/0001_initial.py
|
avian2/kiberpipa-intranet
|
e36f09e3fe74c95d73ea61e4efed8f42b97d08ea
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
intranet/org/migrations/0001_initial.py
|
avian2/kiberpipa-intranet
|
e36f09e3fe74c95d73ea61e4efed8f42b97d08ea
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2017-09-11T22:13:51.000Z
|
2022-01-14T18:19:35.000Z
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tag'
db.create_table('org_tag', (
('name', self.gf('django.db.models.fields.CharField')(max_length=200, primary_key='True')),
('total_ref', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)),
('font_size', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['org.Tag'], null=True, blank=True)),
))
db.send_create_signal('org', ['Tag'])
# Adding model 'ProjectAudit'
db.create_table('org_project_audit', (
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('responsible', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('note', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('salary_rate', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('verbose_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['org.Project'], null=True, blank=True)),
('salary_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mercenaries.SalaryType'], null=True, blank=True)),
('cost_center', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mercenaries.CostCenter'], null=True, blank=True)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('chg_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('email_members', self.gf('django.db.models.fields.NullBooleanField')(default=True, null=True, blank=True)),
('_audit_id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('_audit_timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('_audit_change_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
('id', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
))
db.send_create_signal('org', ['ProjectAudit'])
# Adding model 'Project'
db.create_table('org_project', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('responsible', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('note', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('salary_rate', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('verbose_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['org.Project'], null=True, blank=True)),
('salary_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mercenaries.SalaryType'], null=True, blank=True)),
('cost_center', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mercenaries.CostCenter'], null=True, blank=True)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('chg_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('email_members', self.gf('django.db.models.fields.NullBooleanField')(default=True, null=True, blank=True)),
))
db.send_create_signal('org', ['Project'])
# Adding model 'Category'
db.create_table('org_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('note', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('chg_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('org', ['Category'])
# Adding model 'Place'
db.create_table('org_place', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('note', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('org', ['Place'])
# Adding model 'EmailBlacklist'
db.create_table('org_emailblacklist', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('blacklisted', self.gf('django.db.models.fields.EmailField')(unique=True, max_length=75, db_index=True)),
))
db.send_create_signal('org', ['EmailBlacklist'])
# Adding model 'Email'
db.create_table('org_email', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
))
db.send_create_signal('org', ['Email'])
# Adding model 'Phone'
db.create_table('org_phone', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('org', ['Phone'])
# Adding model 'Organization'
db.create_table('org_organization', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('organization', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('org', ['Organization'])
# Adding model 'Role'
db.create_table('org_role', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('role', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('org', ['Role'])
# Adding model 'IntranetImage'
db.create_table('org_intranetimage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('md5', self.gf('django.db.models.fields.CharField')(db_index=True, unique=True, max_length=32, blank=True)),
))
db.send_create_signal('org', ['IntranetImage'])
# Adding model 'Event'
db.create_table('org_event', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('responsible', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=150, null=True, blank=True)),
('start_date', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('end_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('length', self.gf('django.db.models.fields.TimeField')()),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['org.Project'])),
('require_technician', self.gf('django.db.models.fields.BooleanField')(default=False)),
('require_video', self.gf('django.db.models.fields.BooleanField')(default=False)),
('visitors', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('public', self.gf('django.db.models.fields.BooleanField')(default=True)),
('language', self.gf('django.db.models.fields.CharField')(default='sl', max_length=2, null=True, blank=True)),
('sequence', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('slides', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True)),
('announce', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('short_announce', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('note', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('chg_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('place', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['org.Place'])),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['org.Category'])),
('event_image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['org.IntranetImage'], null=True, blank=True)),
))
db.send_create_signal('org', ['Event'])
# Adding M2M table for field technician on 'Event'
db.create_table('org_event_technician', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('event', models.ForeignKey(orm['org.event'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('org_event_technician', ['event_id', 'user_id'])
# Adding M2M table for field tags on 'Event'
db.create_table('org_event_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('event', models.ForeignKey(orm['org.event'], null=False)),
('tag', models.ForeignKey(orm['org.tag'], null=False))
))
db.create_unique('org_event_tags', ['event_id', 'tag_id'])
# Adding M2M table for field emails on 'Event'
db.create_table('org_event_emails', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('event', models.ForeignKey(orm['org.event'], null=False)),
('email', models.ForeignKey(orm['org.email'], null=False))
))
db.create_unique('org_event_emails', ['event_id', 'email_id'])
# Adding model 'TipSodelovanja'
db.create_table('org_tipsodelovanja', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=40)),
))
db.send_create_signal('org', ['TipSodelovanja'])
# Adding model 'Person'
db.create_table('org_person', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('note', self.gf('django.db.models.fields.CharField')(max_length=230, null=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal('org', ['Person'])
# Adding M2M table for field email on 'Person'
db.create_table('org_person_email', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('person', models.ForeignKey(orm['org.person'], null=False)),
('email', models.ForeignKey(orm['org.email'], null=False))
))
db.create_unique('org_person_email', ['person_id', 'email_id'])
# Adding M2M table for field phone on 'Person'
db.create_table('org_person_phone', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('person', models.ForeignKey(orm['org.person'], null=False)),
('phone', models.ForeignKey(orm['org.phone'], null=False))
))
db.create_unique('org_person_phone', ['person_id', 'phone_id'])
# Adding M2M table for field organization on 'Person'
db.create_table('org_person_organization', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('person', models.ForeignKey(orm['org.person'], null=False)),
('organization', models.ForeignKey(orm['org.organization'], null=False))
))
db.create_unique('org_person_organization', ['person_id', 'organization_id'])
# Adding M2M table for field role on 'Person'
db.create_table('org_person_role', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('person', models.ForeignKey(orm['org.person'], null=False)),
('role', models.ForeignKey(orm['org.role'], null=False))
))
db.create_unique('org_person_role', ['person_id', 'role_id'])
# Adding model 'Sodelovanje'
db.create_table('org_sodelovanje', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('event', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['org.Event'], null=True, blank=True)),
('tip', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['org.TipSodelovanja'], null=True, blank=True)),
('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['org.Person'])),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['org.Project'], null=True, blank=True)),
('note', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('org', ['Sodelovanje'])
# Adding model 'Task'
db.create_table('org_task', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('responsible', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('note', self.gf('django.db.models.fields.TextField')(blank=True)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('chg_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('org', ['Task'])
# Adding model 'Diary'
db.create_table('org_diary', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='diary_author', to=orm['auth.User'])),
('task', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['org.Project'])),
('date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.date(2010, 11, 8), db_index=True)),
('length', self.gf('django.db.models.fields.TimeField')(default=datetime.time(4, 0))),
('event', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['org.Event'], null=True, blank=True)),
('log_formal', self.gf('django.db.models.fields.TextField')()),
('log_informal', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('chg_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('org', ['Diary'])
# Adding M2M table for field tags on 'Diary'
db.create_table('org_diary_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('diary', models.ForeignKey(orm['org.diary'], null=False)),
('tag', models.ForeignKey(orm['org.tag'], null=False))
))
db.create_unique('org_diary_tags', ['diary_id', 'tag_id'])
# Adding model 'StickyNote'
db.create_table('org_stickynote', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='message_author', to=orm['auth.User'])),
('post_date', self.gf('django.db.models.fields.DateField')(default=datetime.date(2010, 11, 8))),
('due_date', self.gf('django.db.models.fields.DateField')(default=datetime.date(2010, 11, 13))),
('note', self.gf('django.db.models.fields.TextField')()),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('chg_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('org', ['StickyNote'])
# Adding M2M table for field tags on 'StickyNote'
db.create_table('org_stickynote_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('stickynote', models.ForeignKey(orm['org.stickynote'], null=False)),
('tag', models.ForeignKey(orm['org.tag'], null=False))
))
db.create_unique('org_stickynote_tags', ['stickynote_id', 'tag_id'])
# Adding model 'Lend'
db.create_table('org_lend', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('what', self.gf('django.db.models.fields.CharField')(max_length=200)),
('to_who', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('from_who', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('from_date', self.gf('django.db.models.fields.DateField')(default=datetime.date(2010, 11, 8))),
('due_date', self.gf('django.db.models.fields.DateField')(default=datetime.date(2010, 11, 9))),
('contact_info', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('why', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('returned', self.gf('django.db.models.fields.BooleanField')(default=False)),
('note', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('chg_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('org', ['Lend'])
# Adding model 'KbCategory'
db.create_table('org_kbcategory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=150)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=75, db_index=True)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('chg_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('org', ['KbCategory'])
# Adding model 'KB'
db.create_table('org_kb', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=150)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=75, db_index=True)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['org.KbCategory'])),
('content', self.gf('django.db.models.fields.TextField')()),
('editor', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('chg_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('org', ['KB'])
# Adding M2M table for field project on 'KB'
db.create_table('org_kb_project', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('kb', models.ForeignKey(orm['org.kb'], null=False)),
('project', models.ForeignKey(orm['org.project'], null=False))
))
db.create_unique('org_kb_project', ['kb_id', 'project_id'])
# Adding M2M table for field task on 'KB'
db.create_table('org_kb_task', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('kb', models.ForeignKey(orm['org.kb'], null=False)),
('task', models.ForeignKey(orm['org.task'], null=False))
))
db.create_unique('org_kb_task', ['kb_id', 'task_id'])
# Adding model 'Shopping'
db.create_table('org_shopping', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='shopping_author', to=orm['auth.User'])),
('explanation', self.gf('django.db.models.fields.TextField')()),
('cost', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=2, blank=True)),
('bought', self.gf('django.db.models.fields.BooleanField')(default=False)),
('responsible', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='shopping_responsible', null=True, to=orm['auth.User'])),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('chg_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('org', ['Shopping'])
# Adding M2M table for field supporters on 'Shopping'
db.create_table('org_shopping_supporters', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('shopping', models.ForeignKey(orm['org.shopping'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('org_shopping_supporters', ['shopping_id', 'user_id'])
# Adding M2M table for field project on 'Shopping'
db.create_table('org_shopping_project', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('shopping', models.ForeignKey(orm['org.shopping'], null=False)),
('project', models.ForeignKey(orm['org.project'], null=False))
))
db.create_unique('org_shopping_project', ['shopping_id', 'project_id'])
# Adding M2M table for field tags on 'Shopping'
db.create_table('org_shopping_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('shopping', models.ForeignKey(orm['org.shopping'], null=False)),
('tag', models.ForeignKey(orm['org.tag'], null=False))
))
db.create_unique('org_shopping_tags', ['shopping_id', 'tag_id'])
# Adding model 'Scratchpad'
db.create_table('org_scratchpad', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content', self.gf('django.db.models.fields.TextField')()),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('chg_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('org', ['Scratchpad'])
def backwards(self, orm):
# Deleting model 'Tag'
db.delete_table('org_tag')
# Deleting model 'ProjectAudit'
db.delete_table('org_project_audit')
# Deleting model 'Project'
db.delete_table('org_project')
# Deleting model 'Category'
db.delete_table('org_category')
# Deleting model 'Place'
db.delete_table('org_place')
# Deleting model 'EmailBlacklist'
db.delete_table('org_emailblacklist')
# Deleting model 'Email'
db.delete_table('org_email')
# Deleting model 'Phone'
db.delete_table('org_phone')
# Deleting model 'Organization'
db.delete_table('org_organization')
# Deleting model 'Role'
db.delete_table('org_role')
# Deleting model 'IntranetImage'
db.delete_table('org_intranetimage')
# Deleting model 'Event'
db.delete_table('org_event')
# Removing M2M table for field technician on 'Event'
db.delete_table('org_event_technician')
# Removing M2M table for field tags on 'Event'
db.delete_table('org_event_tags')
# Removing M2M table for field emails on 'Event'
db.delete_table('org_event_emails')
# Deleting model 'TipSodelovanja'
db.delete_table('org_tipsodelovanja')
# Deleting model 'Person'
db.delete_table('org_person')
# Removing M2M table for field email on 'Person'
db.delete_table('org_person_email')
# Removing M2M table for field phone on 'Person'
db.delete_table('org_person_phone')
# Removing M2M table for field organization on 'Person'
db.delete_table('org_person_organization')
# Removing M2M table for field role on 'Person'
db.delete_table('org_person_role')
# Deleting model 'Sodelovanje'
db.delete_table('org_sodelovanje')
# Deleting model 'Task'
db.delete_table('org_task')
# Deleting model 'Diary'
db.delete_table('org_diary')
# Removing M2M table for field tags on 'Diary'
db.delete_table('org_diary_tags')
# Deleting model 'StickyNote'
db.delete_table('org_stickynote')
# Removing M2M table for field tags on 'StickyNote'
db.delete_table('org_stickynote_tags')
# Deleting model 'Lend'
db.delete_table('org_lend')
# Deleting model 'KbCategory'
db.delete_table('org_kbcategory')
# Deleting model 'KB'
db.delete_table('org_kb')
# Removing M2M table for field project on 'KB'
db.delete_table('org_kb_project')
# Removing M2M table for field task on 'KB'
db.delete_table('org_kb_task')
# Deleting model 'Shopping'
db.delete_table('org_shopping')
# Removing M2M table for field supporters on 'Shopping'
db.delete_table('org_shopping_supporters')
# Removing M2M table for field project on 'Shopping'
db.delete_table('org_shopping_project')
# Removing M2M table for field tags on 'Shopping'
db.delete_table('org_shopping_tags')
# Deleting model 'Scratchpad'
db.delete_table('org_scratchpad')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'mercenaries.costcenter': {
'Meta': {'object_name': 'CostCenter'},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'wage_per_hour': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'})
},
'mercenaries.salarytype': {
'Meta': {'object_name': 'SalaryType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'org.category': {
'Meta': {'object_name': 'Category'},
'chg_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'org.diary': {
'Meta': {'object_name': 'Diary'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'diary_author'", 'to': "orm['auth.User']"}),
'chg_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.date(2010, 11, 8)', 'db_index': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['org.Event']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.TimeField', [], {'default': 'datetime.time(4, 0)'}),
'log_formal': ('django.db.models.fields.TextField', [], {}),
'log_informal': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['org.Tag']", 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['org.Project']"})
},
'org.email': {
'Meta': {'object_name': 'Email'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'org.emailblacklist': {
'Meta': {'object_name': 'EmailBlacklist'},
'blacklisted': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'org.event': {
'Meta': {'ordering': "('title',)", 'object_name': 'Event'},
'announce': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['org.Category']"}),
'chg_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'emails': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['org.Email']", 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'event_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['org.IntranetImage']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'sl'", 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'length': ('django.db.models.fields.TimeField', [], {}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['org.Place']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['org.Project']"}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'require_technician': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'require_video': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'responsible': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'sequence': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'short_announce': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slides': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '150', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['org.Tag']", 'null': 'True', 'blank': 'True'}),
'technician': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'event_technican'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'visitors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'org.intranetimage': {
'Meta': {'ordering': "('-image',)", 'object_name': 'IntranetImage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'md5': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'org.kb': {
'Meta': {'object_name': 'KB'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['org.KbCategory']"}),
'chg_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['org.Project']", 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '75', 'db_index': 'True'}),
'task': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['org.Task']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'org.kbcategory': {
'Meta': {'object_name': 'KbCategory'},
'chg_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '75', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'org.lend': {
'Meta': {'object_name': 'Lend'},
'chg_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'contact_info': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'due_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date(2010, 11, 9)'}),
'from_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date(2010, 11, 8)'}),
'from_who': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'returned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'to_who': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'why': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'org.organization': {
'Meta': {'object_name': 'Organization'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'org.person': {
'Meta': {'ordering': "('name',)", 'object_name': 'Person'},
'email': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['org.Email']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '230', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['org.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['org.Phone']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['org.Role']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'org.phone': {
'Meta': {'object_name': 'Phone'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'org.place': {
'Meta': {'object_name': 'Place'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'org.project': {
'Meta': {'ordering': "('name',)", 'object_name': 'Project'},
'chg_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'cost_center': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mercenaries.CostCenter']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'email_members': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['org.Project']", 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'responsible': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'salary_rate': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'salary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mercenaries.SalaryType']", 'null': 'True', 'blank': 'True'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'org.projectaudit': {
'Meta': {'ordering': "['-_audit_timestamp']", 'object_name': 'ProjectAudit', 'db_table': "'org_project_audit'"},
'_audit_change_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'_audit_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'_audit_timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'chg_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'cost_center': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mercenaries.CostCenter']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'email_members': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['org.Project']", 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'responsible': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'salary_rate': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'salary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mercenaries.SalaryType']", 'null': 'True', 'blank': 'True'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'org.role': {
'Meta': {'object_name': 'Role'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'org.scratchpad': {
'Meta': {'object_name': 'Scratchpad'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'chg_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'org.shopping': {
'Meta': {'object_name': 'Shopping'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shopping_author'", 'to': "orm['auth.User']"}),
'bought': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'chg_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'cost': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'explanation': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['org.Project']", 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'responsible': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'shopping_responsible'", 'null': 'True', 'to': "orm['auth.User']"}),
'supporters': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopping_supporters'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['org.Tag']", 'null': 'True', 'blank': 'True'})
},
'org.sodelovanje': {
'Meta': {'ordering': "('-event__start_date',)", 'object_name': 'Sodelovanje'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['org.Event']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['org.Person']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['org.Project']", 'null': 'True', 'blank': 'True'}),
'tip': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['org.TipSodelovanja']", 'null': 'True', 'blank': 'True'})
},
'org.stickynote': {
'Meta': {'object_name': 'StickyNote'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'message_author'", 'to': "orm['auth.User']"}),
'chg_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'due_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date(2010, 11, 13)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'post_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date(2010, 11, 8)'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['org.Tag']", 'null': 'True', 'blank': 'True'})
},
'org.tag': {
'Meta': {'object_name': 'Tag'},
'font_size': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'primary_key': "'True'"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['org.Tag']", 'null': 'True', 'blank': 'True'}),
'total_ref': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'org.task': {
'Meta': {'object_name': 'Task'},
'chg_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'responsible': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'org.tipsodelovanja': {
'Meta': {'object_name': 'TipSodelovanja'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
}
}
complete_apps = ['org']
| 66.509458
| 208
| 0.587634
| 6,019
| 52,742
| 5.020601
| 0.036385
| 0.092657
| 0.161686
| 0.230981
| 0.858334
| 0.84298
| 0.825209
| 0.778881
| 0.72332
| 0.671862
| 0
| 0.008279
| 0.193868
| 52,742
| 792
| 209
| 66.593434
| 0.702472
| 0.046775
| 0
| 0.396226
| 0
| 0
| 0.473874
| 0.267954
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003145
| false
| 0.001572
| 0.006289
| 0
| 0.014151
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2d877d942adb98a74d656ab0fbee2908e9a2ab22
| 5,765
|
py
|
Python
|
app_api/migrations/0012_album_info_setup.py
|
pkucsie/SIEPServer
|
00b0637eb8302135dfc772fccd18cd749a93e5c6
|
[
"Apache-2.0"
] | 2
|
2021-02-12T10:02:42.000Z
|
2021-03-15T13:08:04.000Z
|
app_api/migrations/0012_album_info_setup.py
|
pkucsie/SIEPServer
|
00b0637eb8302135dfc772fccd18cd749a93e5c6
|
[
"Apache-2.0"
] | null | null | null |
app_api/migrations/0012_album_info_setup.py
|
pkucsie/SIEPServer
|
00b0637eb8302135dfc772fccd18cd749a93e5c6
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.2 on 2021-11-08 16:39
from django.db import migrations, models
import tyadmin_api_cli.fields
class Migration(migrations.Migration):
dependencies = [
('app_api', '0011_score'),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userid', models.CharField(max_length=255, verbose_name='用户id')),
('title', models.CharField(max_length=255, unique=True, verbose_name='标题')),
('content', models.CharField(max_length=255, verbose_name='内容')),
('desc', models.CharField(blank=True, max_length=255, null=True, verbose_name='描述')),
('status', models.CharField(choices=[('UNUSE', '待审核'), ('COMM', '正常'), ('OVER', '结束'), ('PEDDING', '停用'), ('DEL', '删除')], max_length=255, verbose_name='状态')),
('type', models.CharField(default='其他', max_length=255, verbose_name='类型')),
('order', models.IntegerField(default=9999, verbose_name='排序')),
('viewcnt', models.IntegerField(default=0, verbose_name='访问次数')),
('favcnt', models.IntegerField(default=0, verbose_name='收藏人数')),
('commentcnt', models.IntegerField(default=0, verbose_name='评论数')),
('likecnt', models.IntegerField(default=0, verbose_name='点赞数')),
('pic', tyadmin_api_cli.fields.SImageField(max_length=255, upload_to='Album_img', verbose_name='附加图片')),
('addtime', models.DateTimeField(auto_now=True, verbose_name='添加时间')),
('edittime', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('addip', models.CharField(blank=True, max_length=255, null=True, verbose_name='添加IP')),
('editip', models.CharField(blank=True, max_length=255, null=True, verbose_name='修改IP')),
],
options={
'verbose_name': '校友录',
'verbose_name_plural': '校友录',
},
),
migrations.CreateModel(
name='Info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userid', models.CharField(max_length=255, verbose_name='用户id')),
('title', models.CharField(max_length=255, verbose_name='标题')),
('content', models.CharField(max_length=255, verbose_name='内容')),
('desc', models.CharField(blank=True, max_length=255, null=True, verbose_name='描述')),
('status', models.CharField(choices=[('UNUSE', '待审核'), ('COMM', '正常'), ('OVER', '结束'), ('PEDDING', '停用'), ('DEL', '删除')], max_length=255, verbose_name='状态')),
('type', models.CharField(default='其他', max_length=255, verbose_name='类型')),
('order', models.IntegerField(default=9999, verbose_name='排序')),
('viewcnt', models.IntegerField(default=0, verbose_name='访问次数')),
('favcnt', models.IntegerField(default=0, verbose_name='收藏人数')),
('commentcnt', models.IntegerField(default=0, verbose_name='评论数')),
('likecnt', models.IntegerField(default=0, verbose_name='点赞数')),
('exptime', models.IntegerField(default=0, verbose_name='过期时间')),
('province', models.CharField(blank=True, max_length=255, null=True, verbose_name='区域(省)')),
('city', models.CharField(blank=True, max_length=255, null=True, verbose_name='区域(市)')),
('county', models.CharField(blank=True, max_length=255, null=True, verbose_name='区域(区)')),
('pic', tyadmin_api_cli.fields.SImageField(max_length=255, upload_to='Album_info', verbose_name='附加图片')),
('addtime', models.DateTimeField(auto_now=True, verbose_name='添加时间')),
('edittime', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('addip', models.CharField(blank=True, max_length=255, null=True, verbose_name='添加IP')),
('editip', models.CharField(blank=True, max_length=255, null=True, verbose_name='修改IP')),
],
options={
'verbose_name': '信息',
'verbose_name_plural': '信息',
},
),
migrations.CreateModel(
name='Setup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=255, null=True, verbose_name='网站名称')),
('content', models.CharField(blank=True, max_length=255, null=True, verbose_name='关于我们')),
('logo', tyadmin_api_cli.fields.SImageField(max_length=255, upload_to='Album_about', verbose_name='网站底图')),
('adpic', tyadmin_api_cli.fields.SImageField(max_length=255, upload_to='Album_about', verbose_name='海报底图')),
('regcheck', models.IntegerField(default=0, verbose_name='注册是否审核')),
('addtime', models.IntegerField(default=0, verbose_name='添加时间')),
('edittime', models.IntegerField(default=0, verbose_name='修改时间')),
('addip', models.CharField(blank=True, max_length=255, null=True, verbose_name='添加IP')),
('editip', models.CharField(blank=True, max_length=255, null=True, verbose_name='修改IP')),
],
options={
'verbose_name': '关于我们',
'verbose_name_plural': '关于我们',
},
),
]
| 64.055556
| 175
| 0.576409
| 616
| 5,765
| 5.209416
| 0.204545
| 0.185104
| 0.100966
| 0.097227
| 0.866625
| 0.862886
| 0.820193
| 0.820193
| 0.820193
| 0.820193
| 0
| 0.027959
| 0.255507
| 5,765
| 89
| 176
| 64.775281
| 0.719711
| 0.007806
| 0
| 0.578313
| 1
| 0
| 0.123645
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.024096
| 0
| 0.060241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
93377d9708ed937bcd4d00f5a6a5eaa5b5a45588
| 8,090
|
py
|
Python
|
python/github_com/TheThingsNetwork/api/networkserver/networkserver_pb2_grpc.py
|
LukasHabring/api
|
9e3da3462f14dab4c45fa38b03335e85e1970833
|
[
"MIT"
] | 14
|
2017-07-14T16:11:54.000Z
|
2021-11-16T12:35:37.000Z
|
python/github_com/TheThingsNetwork/api/networkserver/networkserver_pb2_grpc.py
|
LukasHabring/api
|
9e3da3462f14dab4c45fa38b03335e85e1970833
|
[
"MIT"
] | 34
|
2017-07-14T15:15:13.000Z
|
2021-08-18T10:08:10.000Z
|
python/github_com/TheThingsNetwork/api/networkserver/networkserver_pb2_grpc.py
|
LukasHabring/api
|
9e3da3462f14dab4c45fa38b03335e85e1970833
|
[
"MIT"
] | 12
|
2017-07-25T16:13:16.000Z
|
2021-05-08T07:21:50.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from github_com.TheThingsNetwork.api.broker import broker_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_broker_dot_broker__pb2
from github_com.TheThingsNetwork.api.handler import handler_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_handler_dot_handler__pb2
from github_com.TheThingsNetwork.api.networkserver import networkserver_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_networkserver_dot_networkserver__pb2
class NetworkServerStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetDevices = channel.unary_unary(
'/networkserver.NetworkServer/GetDevices',
request_serializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_networkserver_dot_networkserver__pb2.DevicesRequest.SerializeToString,
response_deserializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_networkserver_dot_networkserver__pb2.DevicesResponse.FromString,
)
self.PrepareActivation = channel.unary_unary(
'/networkserver.NetworkServer/PrepareActivation',
request_serializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_broker_dot_broker__pb2.DeduplicatedDeviceActivationRequest.SerializeToString,
response_deserializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_broker_dot_broker__pb2.DeduplicatedDeviceActivationRequest.FromString,
)
self.Activate = channel.unary_unary(
'/networkserver.NetworkServer/Activate',
request_serializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_handler_dot_handler__pb2.DeviceActivationResponse.SerializeToString,
response_deserializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_handler_dot_handler__pb2.DeviceActivationResponse.FromString,
)
self.Uplink = channel.unary_unary(
'/networkserver.NetworkServer/Uplink',
request_serializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_broker_dot_broker__pb2.DeduplicatedUplinkMessage.SerializeToString,
response_deserializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_broker_dot_broker__pb2.DeduplicatedUplinkMessage.FromString,
)
self.Downlink = channel.unary_unary(
'/networkserver.NetworkServer/Downlink',
request_serializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_broker_dot_broker__pb2.DownlinkMessage.SerializeToString,
response_deserializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_broker_dot_broker__pb2.DownlinkMessage.FromString,
)
class NetworkServerServicer(object):
# missing associated documentation comment in .proto file
pass
def GetDevices(self, request, context):
"""Broker requests devices with DevAddr and matching FCnt (or disabled FCnt check)
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PrepareActivation(self, request, context):
"""Broker requests device activation "template" from Network Server
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Activate(self, request, context):
"""Broker confirms device activation (after response from Handler)
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Uplink(self, request, context):
"""Broker informs Network Server about Uplink
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Downlink(self, request, context):
"""Broker informs Network Server about Downlink, NetworkServer may add MAC commands and re-set MIC
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_NetworkServerServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetDevices': grpc.unary_unary_rpc_method_handler(
servicer.GetDevices,
request_deserializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_networkserver_dot_networkserver__pb2.DevicesRequest.FromString,
response_serializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_networkserver_dot_networkserver__pb2.DevicesResponse.SerializeToString,
),
'PrepareActivation': grpc.unary_unary_rpc_method_handler(
servicer.PrepareActivation,
request_deserializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_broker_dot_broker__pb2.DeduplicatedDeviceActivationRequest.FromString,
response_serializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_broker_dot_broker__pb2.DeduplicatedDeviceActivationRequest.SerializeToString,
),
'Activate': grpc.unary_unary_rpc_method_handler(
servicer.Activate,
request_deserializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_handler_dot_handler__pb2.DeviceActivationResponse.FromString,
response_serializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_handler_dot_handler__pb2.DeviceActivationResponse.SerializeToString,
),
'Uplink': grpc.unary_unary_rpc_method_handler(
servicer.Uplink,
request_deserializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_broker_dot_broker__pb2.DeduplicatedUplinkMessage.FromString,
response_serializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_broker_dot_broker__pb2.DeduplicatedUplinkMessage.SerializeToString,
),
'Downlink': grpc.unary_unary_rpc_method_handler(
servicer.Downlink,
request_deserializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_broker_dot_broker__pb2.DownlinkMessage.FromString,
response_serializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_broker_dot_broker__pb2.DownlinkMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'networkserver.NetworkServer', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class NetworkServerManagerStub(object):
"""The NetworkServerManager service provides configuration and monitoring
functionality
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetStatus = channel.unary_unary(
'/networkserver.NetworkServerManager/GetStatus',
request_serializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_networkserver_dot_networkserver__pb2.StatusRequest.SerializeToString,
response_deserializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_networkserver_dot_networkserver__pb2.Status.FromString,
)
class NetworkServerManagerServicer(object):
"""The NetworkServerManager service provides configuration and monitoring
functionality
"""
def GetStatus(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_NetworkServerManagerServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetStatus,
request_deserializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_networkserver_dot_networkserver__pb2.StatusRequest.FromString,
response_serializer=github_dot_com_dot_TheThingsNetwork_dot_api_dot_networkserver_dot_networkserver__pb2.Status.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'networkserver.NetworkServerManager', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 50.248447
| 163
| 0.810012
| 876
| 8,090
| 6.98516
| 0.126712
| 0.039712
| 0.05295
| 0.066187
| 0.843438
| 0.792613
| 0.782154
| 0.732146
| 0.707632
| 0.68557
| 0
| 0.004257
| 0.128925
| 8,090
| 160
| 164
| 50.5625
| 0.864056
| 0.107911
| 0
| 0.321101
| 1
| 0
| 0.088895
| 0.042064
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091743
| false
| 0.027523
| 0.036697
| 0
| 0.165138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
933f4b3a83c8da0b76ed27a98639fa7941a3babf
| 68,645
|
py
|
Python
|
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_locality/cmp_gobmk/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_locality/cmp_gobmk/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_locality/cmp_gobmk/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 1.88938e-06,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.20269,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 3.03604e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.325691,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.56398,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.323458,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.21313,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.321928,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.5053,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 5.73574e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0118066,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0853764,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0873168,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0853821,
'Execution Unit/Register Files/Runtime Dynamic': 0.0991234,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.206305,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.553821,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.47414,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00309221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00309221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00274229,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00108837,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00125431,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.010181,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.027898,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0839399,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.3393,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.267363,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.285098,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.82091,
'Instruction Fetch Unit/Runtime Dynamic': 0.67448,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0505317,
'L2/Runtime Dynamic': 0.0135384,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.49711,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.10565,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0731162,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0731162,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.84379,
'Load Store Unit/Runtime Dynamic': 1.53935,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.180292,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.360584,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0639863,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0644142,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.331978,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0448107,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.601096,
'Memory Management Unit/Runtime Dynamic': 0.109225,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 22.3833,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 2.00279e-05,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0166543,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.170925,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.1876,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 4.99834,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 9.4469e-07,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.20269,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 2.02403e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.18749,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.302414,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.152648,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.642552,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.21443,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.24251,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 3.82383e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00786416,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0568674,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0581603,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0568712,
'Execution Unit/Register Files/Runtime Dynamic': 0.0660244,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.119804,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.321608,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.63825,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00218588,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00218588,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00197079,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000799513,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000835477,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00717804,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.018568,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.055911,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.55642,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.178246,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.189899,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.94753,
'Instruction Fetch Unit/Runtime Dynamic': 0.449802,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0335436,
'L2/Runtime Dynamic': 0.00898301,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.74267,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.736506,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0487083,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0487083,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.97269,
'Load Store Unit/Runtime Dynamic': 1.02543,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.120106,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.240213,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0426261,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0429091,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.221125,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0298749,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.450458,
'Memory Management Unit/Runtime Dynamic': 0.0727841,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.2362,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 9.80041e-06,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00845914,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0963054,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.104774,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.30002,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 9.4469e-07,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.20269,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 2.02403e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.187491,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.302416,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.152649,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.642556,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.214433,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.24252,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 3.82383e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00786421,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.056868,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0581606,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0568718,
'Execution Unit/Register Files/Runtime Dynamic': 0.0660248,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.119805,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.32161,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.63826,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0021859,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0021859,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00197081,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000799518,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000835482,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00717808,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0185681,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0559113,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.55644,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.178247,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.1899,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.94755,
'Instruction Fetch Unit/Runtime Dynamic': 0.449805,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0335487,
'L2/Runtime Dynamic': 0.00898351,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.74269,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.736513,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0487088,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0487087,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.9727,
'Load Store Unit/Runtime Dynamic': 1.02544,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.120108,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.240215,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0426266,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0429095,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.221126,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0298751,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.45046,
'Memory Management Unit/Runtime Dynamic': 0.0727846,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.2363,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 9.80041e-06,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00845919,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0963059,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.104775,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.30004,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 9.4469e-07,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.20269,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 2.02403e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.18749,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.302414,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.152648,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.642552,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.21443,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.24251,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 3.82383e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00786416,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0568674,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0581603,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0568712,
'Execution Unit/Register Files/Runtime Dynamic': 0.0660245,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.119804,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.321608,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.63825,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00218588,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00218588,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0019708,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000799514,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000835477,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00717804,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.018568,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.055911,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.55642,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.178246,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.189899,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.94753,
'Instruction Fetch Unit/Runtime Dynamic': 0.449802,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0335436,
'L2/Runtime Dynamic': 0.00898292,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.74267,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.736506,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0487083,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0487084,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.97269,
'Load Store Unit/Runtime Dynamic': 1.02543,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.120106,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.240213,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0426261,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0429092,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.221125,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.029875,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.450458,
'Memory Management Unit/Runtime Dynamic': 0.0727841,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.2362,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 9.80041e-06,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00845914,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0963054,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.104774,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.30002,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 0.2204280563668612,
'Runtime Dynamic': 0.2204280563668612,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.103759,
'Runtime Dynamic': 0.0654962,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 74.1957,
'Peak Power': 107.308,
'Runtime Dynamic': 14.9639,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 74.092,
'Total Cores/Runtime Dynamic': 14.8984,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.103759,
'Total L3s/Runtime Dynamic': 0.0654962,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.103939
| 124
| 0.682016
| 8,098
| 68,645
| 5.775377
| 0.061497
| 0.123501
| 0.112895
| 0.093395
| 0.952169
| 0.944878
| 0.935085
| 0.915713
| 0.896427
| 0.8796
| 0
| 0.131934
| 0.224212
| 68,645
| 914
| 125
| 75.103939
| 0.746291
| 0
| 0
| 0.749453
| 0
| 0
| 0.657067
| 0.048073
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fab94f5bf4a7c41a7d325ce95c3d5f0bf987bef9
| 39
|
py
|
Python
|
app/main/views/__init__.py
|
Jaydonjin/seer
|
adeb14601b07de93603176694115c9673f010989
|
[
"MIT"
] | null | null | null |
app/main/views/__init__.py
|
Jaydonjin/seer
|
adeb14601b07de93603176694115c9673f010989
|
[
"MIT"
] | null | null | null |
app/main/views/__init__.py
|
Jaydonjin/seer
|
adeb14601b07de93603176694115c9673f010989
|
[
"MIT"
] | null | null | null |
from . import index
from . import misc
| 13
| 19
| 0.74359
| 6
| 39
| 4.833333
| 0.666667
| 0.689655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 39
| 2
| 20
| 19.5
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
facdf6e9df546e020e1e81e0b4e65db6b7acef40
| 130,928
|
py
|
Python
|
src/CLI/actioner/bgp_openconfig_to_restconf_map.py
|
project-arlo/sonic-mgmt-framework
|
562cd84ff3fec9ca705c7df621742f2daa61ce71
|
[
"Apache-2.0"
] | 7
|
2019-10-17T06:12:02.000Z
|
2021-09-08T11:16:19.000Z
|
src/CLI/actioner/bgp_openconfig_to_restconf_map.py
|
noolex/sonic-mgmt-framework
|
5493889adc47fc584b04dca1a0cc0a2007211df4
|
[
"Apache-2.0"
] | 207
|
2019-06-24T04:48:11.000Z
|
2020-05-06T05:51:37.000Z
|
src/CLI/actioner/bgp_openconfig_to_restconf_map.py
|
noolex/sonic-mgmt-framework
|
5493889adc47fc584b04dca1a0cc0a2007211df4
|
[
"Apache-2.0"
] | 20
|
2019-06-27T19:24:45.000Z
|
2021-07-15T21:12:30.000Z
|
restconf_map = {
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_config_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/as',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_config_router_id' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/router-id',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_config_disable_ebgp_connected_route_check' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/openconfig-bgp-ext:disable-ebgp-connected-route-check',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_config_fast_external_failover' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/openconfig-bgp-ext:fast-external-failover',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_config_network_import_check' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/openconfig-bgp-ext:network-import-check',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_config_graceful_shutdown' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/openconfig-bgp-ext:graceful-shutdown',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_config_clnt_to_clnt_reflection' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/openconfig-bgp-ext:clnt-to-clnt-reflection',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_config_max_dynamic_neighbors' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/openconfig-bgp-ext:max-dynamic-neighbors',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_config_read_quanta' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/openconfig-bgp-ext:read-quanta',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_config_write_quanta' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/openconfig-bgp-ext:write-quanta',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_config_coalesce_time' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/openconfig-bgp-ext:coalesce-time',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_config_route_map_process_delay' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/openconfig-bgp-ext:route-map-process-delay',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_config_deterministic_med' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/openconfig-bgp-ext:deterministic-med',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_config_hold_time' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/openconfig-bgp-ext:hold-time',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_config_keepalive_interval' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/config/openconfig-bgp-ext:keepalive-interval',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_confederation' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/confederation',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_confederation_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/confederation/config',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_confederation_config_identifier' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/confederation/config/identifier',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_confederation_config_member_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/confederation/config/member-as',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_graceful_restart' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/graceful-restart',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_graceful_restart_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/graceful-restart/config',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_graceful_restart_config_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/graceful-restart/config/enabled',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_graceful_restart_config_restart_time' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/graceful-restart/config/restart-time',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_graceful_restart_config_stale_routes_time' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/graceful-restart/config/stale-routes-time',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_graceful_restart_config_preserve_fw_state' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/graceful-restart/config/openconfig-bgp-ext:preserve-fw-state',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_use_multiple_paths' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/use-multiple-paths',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_use_multiple_paths_ebgp' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/use-multiple-paths/ebgp',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_use_multiple_paths_ebgp_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/use-multiple-paths/ebgp/config',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_use_multiple_paths_ebgp_config_allow_multiple_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/use-multiple-paths/ebgp/config/allow-multiple-as',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_use_multiple_paths_ebgp_config_as_set' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/use-multiple-paths/ebgp/config/openconfig-bgp-ext:as-set',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_route_selection_options' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/route-selection-options',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_route_selection_options_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/route-selection-options/config',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_route_selection_options_config_always_compare_med' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/route-selection-options/config/always-compare-med',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_route_selection_options_config_ignore_as_path_length' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/route-selection-options/config/ignore-as-path-length',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_route_selection_options_config_external_compare_router_id' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/route-selection-options/config/external-compare-router-id',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_route_selection_options_config_med_confed' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/route-selection-options/config/openconfig-bgp-ext:med-confed',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_route_selection_options_config_med_missing_as_worst' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/route-selection-options/config/openconfig-bgp-ext:med-missing-as-worst',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_route_selection_options_config_compare_confed_as_path' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/route-selection-options/config/openconfig-bgp-ext:compare-confed-as-path',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}',
'list_openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_config_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/config/enabled',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_config_table_map_name' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:table-map-name',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_use_multiple_paths' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/use-multiple-paths',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_use_multiple_paths_ebgp' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/use-multiple-paths/ebgp',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_use_multiple_paths_ebgp_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/use-multiple-paths/ebgp/config',
'openconfig_network_instance1348121867' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/use-multiple-paths/ebgp/config/maximum-paths',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_use_multiple_paths_ibgp' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/use-multiple-paths/ibgp',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_use_multiple_paths_ibgp_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/use-multiple-paths/ibgp/config',
'openconfig_network_instance1543452951' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/use-multiple-paths/ibgp/config/maximum-paths',
'openconfig_bgp_ext3691744053' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/use-multiple-paths/ibgp/config/openconfig-bgp-ext:equal-cluster-length',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_prefix_limit' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_prefix_limit_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config',
'openconfig_network_instance2494599511' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config/max-prefixes',
'openconfig_network_instance766838271' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config/prevent-teardown',
'openconfig_network_instance1949223480' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config/warning-threshold-pct',
'openconfig_network_instance4089704618' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config/restart-timer',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_advertise_all_vni' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:advertise-all-vni',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_advertise_list' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:advertise-list',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_advertise_default_gw' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:advertise-default-gw',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_route_distinguisher' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:route-distinguisher',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vpn_target' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vpn-target={route-target}',
'list_openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vpn_target' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vpn-target',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vpn_target_route_target_type' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vpn-target={route-target}/route-target-type',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}',
'list_openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni_state' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/state',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni_state_vni_number' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/state/vni-number',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni_state_type' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/state/type',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni_state_is_live' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/state/is-live',
'openconfig_bgp_evpn_ext3291680974' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/state/route-distinguisher',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni_state_originator' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/state/originator',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni_state_mcast_group' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/state/mcast-group',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni_state_advertise_gw_mac' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/state/advertise-gw-mac',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni_state_import_rts' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/state/import-rts',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni_state_export_rts' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/state/export-rts',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni_advertise_default_gw' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/advertise-default-gw',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni_route_distinguisher' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/route-distinguisher',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni_vpn_target' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/vpn-target={route-target}',
'list_openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_vnis_vni_vpn_target' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/vpn-target',
'openconfig_bgp_evpn_ext4078896717' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni={vni-number}/vpn-target={route-target}/route-target-type',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_default_originate' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:default-originate',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_default_originate_ipv4' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:default-originate/ipv4',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_default_originate_ipv6' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:default-originate/ipv6',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_autort' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:autort',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_flooding' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:flooding',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_dup_addr_detection' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:dup-addr-detection',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_dup_addr_detection_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:dup-addr-detection/enabled',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_dup_addr_detection_max_moves' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:dup-addr-detection/max-moves',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_dup_addr_detection_time' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:dup-addr-detection/time',
'openconfig_bgp_evpn_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_l2vpn_evpn_dup_addr_detection_freeze' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/openconfig-bgp-evpn-ext:dup-addr-detection/freeze',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_aggregate_address_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:aggregate-address-config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_aggregate_address_config_aggregate_address' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:aggregate-address-config/aggregate-address={prefix}',
'list_openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_aggregate_address_config_aggregate_address' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:aggregate-address-config/aggregate-address',
'openconfig_bgp_ext2461397931' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:aggregate-address-config/aggregate-address={prefix}/config/policy-name',
'openconfig_bgp_ext2155307832' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:aggregate-address-config/aggregate-address={prefix}/config/as-set',
'openconfig_bgp_ext1133616225' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:aggregate-address-config/aggregate-address={prefix}/config/summary-only',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_network_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:network-config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_network_config_network' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:network-config/network={prefix}',
'list_openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_network_config_network' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:network-config/network',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_network_config_network_config_policy_name' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:network-config/network={prefix}/config/policy-name',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_network_config_network_config_backdoor' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:network-config/network={prefix}/config/backdoor',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_default_route_distance' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:default-route-distance',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_default_route_distance_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:default-route-distance/config',
'openconfig_bgp_ext1219850592' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:default-route-distance/config/external-route-distance',
'openconfig_bgp_ext1240612726' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:default-route-distance/config/internal-route-distance',
'openconfig_bgp_ext4838094' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:default-route-distance/config/local-route-distance',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_route_flap_damping' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:route-flap-damping',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_route_flap_damping_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:route-flap-damping/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_route_flap_damping_config_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:route-flap-damping/config/enabled',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_route_flap_damping_config_half_life' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:route-flap-damping/config/half-life',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_route_flap_damping_config_reuse_threshold' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:route-flap-damping/config/reuse-threshold',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_route_flap_damping_config_suppress_threshold' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:route-flap-damping/config/suppress-threshold',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_afi_safis_afi_safi_route_flap_damping_config_max_suppress' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:route-flap-damping/config/max-suppress',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_dynamic_neighbor_prefixes' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/dynamic-neighbor-prefixes',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_dynamic_neighbor_prefixes_dynamic_neighbor_prefix' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/dynamic-neighbor-prefixes/dynamic-neighbor-prefix={prefix}',
'list_openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_global_dynamic_neighbor_prefixes_dynamic_neighbor_prefix' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/dynamic-neighbor-prefixes/dynamic-neighbor-prefix',
'openconfig_network_instance1717438887' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/dynamic-neighbor-prefixes/dynamic-neighbor-prefix={prefix}/config/peer-group',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_logging_options' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:logging-options',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_logging_options_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:logging-options/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_logging_options_config_log_neighbor_state_changes' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:logging-options/config/log-neighbor-state-changes',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_bgp_ext_route_reflector' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:bgp-ext-route-reflector',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_bgp_ext_route_reflector_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:bgp-ext-route-reflector/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_bgp_ext_route_reflector_config_route_reflector_cluster_id' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:bgp-ext-route-reflector/config/route-reflector-cluster-id',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_bgp_ext_route_reflector_config_allow_outbound_policy' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:bgp-ext-route-reflector/config/allow-outbound-policy',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_global_defaults' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:global-defaults',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_global_defaults_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:global-defaults/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_global_defaults_config_ipv4_unicast' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:global-defaults/config/ipv4-unicast',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_global_defaults_config_local_preference' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:global-defaults/config/local-preference',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_global_defaults_config_show_hostname' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:global-defaults/config/show-hostname',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_global_defaults_config_shutdown' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:global-defaults/config/shutdown',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_global_defaults_config_subgroup_pkt_queue_max' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:global-defaults/config/subgroup-pkt-queue-max',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_update_delay' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:update-delay',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_update_delay_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:update-delay/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_update_delay_config_max_delay' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:update-delay/config/max-delay',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_update_delay_config_establish_wait' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:update-delay/config/establish-wait',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_max_med' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:max-med',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_max_med_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:max-med/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_max_med_config_time' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:max-med/config/time',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_max_med_config_max_med_val' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:max-med/config/max-med-val',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_max_med_config_administrative' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:max-med/config/administrative',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_global_max_med_config_admin_max_med_val' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/global/openconfig-bgp-ext:max-med/config/admin-max-med-val',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}',
'list_openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_peer_group' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/peer-group',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/enabled',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_peer_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/peer-as',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_local_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/local-as',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_peer_type' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/peer-type',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_auth_password' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/auth-password',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_description' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/description',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_disable_ebgp_connected_route_check' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/openconfig-bgp-ext:disable-ebgp-connected-route-check',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_enforce_first_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/openconfig-bgp-ext:enforce-first-as',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_solo_peer' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/openconfig-bgp-ext:solo-peer',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_ttl_security_hops' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/openconfig-bgp-ext:ttl-security-hops',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_capability_extended_nexthop' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/openconfig-bgp-ext:capability-extended-nexthop',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_capability_dynamic' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/openconfig-bgp-ext:capability-dynamic',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_dont_negotiate_capability' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/openconfig-bgp-ext:dont-negotiate-capability',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_enforce_multihop' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/openconfig-bgp-ext:enforce-multihop',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_override_capability' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/openconfig-bgp-ext:override-capability',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_peer_port' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/openconfig-bgp-ext:peer-port',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_shutdown_message' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/openconfig-bgp-ext:shutdown-message',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_strict_capability_match' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/openconfig-bgp-ext:strict-capability-match',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_local_as_no_prepend' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/openconfig-bgp-ext:local-as-no-prepend',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_config_local_as_replace_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/config/openconfig-bgp-ext:local-as-replace-as',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_timers' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/timers',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_timers_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/timers/config',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_timers_config_connect_retry' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/timers/config/connect-retry',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_timers_config_hold_time' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/timers/config/hold-time',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_timers_config_keepalive_interval' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/timers/config/keepalive-interval',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_timers_config_minimum_advertisement_interval' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/timers/config/minimum-advertisement-interval',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_transport' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/transport',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_transport_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/transport/config',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_transport_config_passive_mode' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/transport/config/passive-mode',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_transport_config_local_address' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/transport/config/local-address',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_ebgp_multihop' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/ebgp-multihop',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_ebgp_multihop_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/ebgp-multihop/config',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_ebgp_multihop_config_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/ebgp-multihop/config/enabled',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_ebgp_multihop_config_multihop_ttl' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/ebgp-multihop/config/multihop-ttl',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}',
'list_openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_config_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/config/enabled',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_config_soft_reconfiguration_in' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:soft-reconfiguration-in',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_config_unsuppress_map_name' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:unsuppress-map-name',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_config_weight' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:weight',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_config_as_override' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:as-override',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_config_send_community' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:send-community',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_config_route_reflector_client' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:route-reflector-client',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_config_route_server_client' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:route-server-client',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_add_paths' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/add-paths',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_add_paths_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/add-paths/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_add_paths_config_tx_add_paths' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/add-paths/config/openconfig-bgp-ext:tx-add-paths',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_apply_policy' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/apply-policy',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_apply_policy_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/apply-policy/config',
'openconfig_network_instance3764031561' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/apply-policy/config/import-policy',
'openconfig_network_instance3026816683' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/apply-policy/config/default-import-policy',
'openconfig_network_instance1837635724' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/apply-policy/config/export-policy',
'openconfig_network_instance3073473323' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/apply-policy/config/default-export-policy',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_prefix_limit' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/prefix-limit',
'openconfig_network_instance3828573403' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/prefix-limit/config',
'openconfig_network_instance71336515' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/prefix-limit/config/max-prefixes',
'openconfig_network_instance2611735211' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/prefix-limit/config/prevent-teardown',
'openconfig_network_instance1058079578' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/prefix-limit/config/warning-threshold-pct',
'openconfig_network_instance1942970348' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/prefix-limit/config/restart-timer',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/config',
'openconfig_network_instance1624994673' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/config/send-default-route',
'openconfig_bgp_ext841615068' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/config/openconfig-bgp-ext:default-policy-name',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_prefix_limit' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/prefix-limit',
'openconfig_network_instance1753955874' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/prefix-limit/config',
'openconfig_network_instance2212248367' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/prefix-limit/config/max-prefixes',
'openconfig_network_instance2110995234' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/prefix-limit/config/prevent-teardown',
'openconfig_network_instance446783776' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/prefix-limit/config/warning-threshold-pct',
'openconfig_network_instance943303393' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/prefix-limit/config/restart-timer',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/config',
'openconfig_network_instance4125292543' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/config/send-default-route',
'openconfig_bgp_ext2059791605' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/config/openconfig-bgp-ext:default-policy-name',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_prefix_limit' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit',
'openconfig_network_instance985144991' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config',
'openconfig_network_instance1553350943' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config/max-prefixes',
'openconfig_network_instance2837457500' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config/prevent-teardown',
'openconfig_network_instance3371165607' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config/warning-threshold-pct',
'openconfig_network_instance2520259662' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config/restart-timer',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_allow_own_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:allow-own-as',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_allow_own_as_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:allow-own-as/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_allow_own_as_config_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:allow-own-as/config/enabled',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_allow_own_as_config_as_count' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:allow-own-as/config/as-count',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_allow_own_as_config_origin' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:allow-own-as/config/origin',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_attribute_unchanged' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:attribute-unchanged',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_attribute_unchanged_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:attribute-unchanged/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_attribute_unchanged_config_as_path' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:attribute-unchanged/config/as-path',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_attribute_unchanged_config_med' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:attribute-unchanged/config/med',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_attribute_unchanged_config_next_hop' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:attribute-unchanged/config/next-hop',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_filter_list' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:filter-list',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_filter_list_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:filter-list/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_filter_list_config_import_policy' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:filter-list/config/import-policy',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_filter_list_config_export_policy' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:filter-list/config/export-policy',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_next_hop_self' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:next-hop-self',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_next_hop_self_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:next-hop-self/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_next_hop_self_config_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:next-hop-self/config/enabled',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_next_hop_self_config_force' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:next-hop-self/config/force',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_prefix_list' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:prefix-list',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_prefix_list_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:prefix-list/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_prefix_list_config_import_policy' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:prefix-list/config/import-policy',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_prefix_list_config_export_policy' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:prefix-list/config/export-policy',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_remove_private_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:remove-private-as',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_remove_private_as_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:remove-private-as/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_remove_private_as_config_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:remove-private-as/config/enabled',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_remove_private_as_config_all' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:remove-private-as/config/all',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_remove_private_as_config_replace_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:remove-private-as/config/replace-as',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_capability_orf' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:capability-orf',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_capability_orf_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:capability-orf/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_neighbors_neighbor_afi_safis_afi_safi_capability_orf_config_orf_type' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/neighbors/neighbor={neighbor-address}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:capability-orf/config/orf-type',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}',
'list_openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_peer_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/peer-as',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_local_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/local-as',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_peer_type' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/peer-type',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_auth_password' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/auth-password',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_description' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/description',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:enabled',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_disable_ebgp_connected_route_check' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:disable-ebgp-connected-route-check',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_enforce_first_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:enforce-first-as',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_solo_peer' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:solo-peer',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_ttl_security_hops' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:ttl-security-hops',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_capability_extended_nexthop' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:capability-extended-nexthop',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_capability_dynamic' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:capability-dynamic',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_dont_negotiate_capability' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:dont-negotiate-capability',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_enforce_multihop' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:enforce-multihop',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_override_capability' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:override-capability',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_peer_port' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:peer-port',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_shutdown_message' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:shutdown-message',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_strict_capability_match' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:strict-capability-match',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_local_as_no_prepend' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:local-as-no-prepend',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_config_local_as_replace_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/config/openconfig-bgp-ext:local-as-replace-as',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_timers' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/timers',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_timers_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/timers/config',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_timers_config_connect_retry' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/timers/config/connect-retry',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_timers_config_hold_time' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/timers/config/hold-time',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_timers_config_keepalive_interval' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/timers/config/keepalive-interval',
'openconfig_network_instance1223315985' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/timers/config/minimum-advertisement-interval',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_transport' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/transport',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_transport_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/transport/config',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_transport_config_passive_mode' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/transport/config/passive-mode',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_transport_config_local_address' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/transport/config/local-address',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_ebgp_multihop' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/ebgp-multihop',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_ebgp_multihop_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/ebgp-multihop/config',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_ebgp_multihop_config_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/ebgp-multihop/config/enabled',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_ebgp_multihop_config_multihop_ttl' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/ebgp-multihop/config/multihop-ttl',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}',
'list_openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_config_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/config/enabled',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_config_soft_reconfiguration_in' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:soft-reconfiguration-in',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_config_unsuppress_map_name' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:unsuppress-map-name',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_config_weight' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:weight',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_config_as_override' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:as-override',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_config_send_community' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:send-community',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_config_route_reflector_client' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:route-reflector-client',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_config_route_server_client' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/config/openconfig-bgp-ext:route-server-client',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_add_paths' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/add-paths',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_add_paths_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/add-paths/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_add_paths_config_tx_add_paths' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/add-paths/config/openconfig-bgp-ext:tx-add-paths',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_apply_policy' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/apply-policy',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_apply_policy_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/apply-policy/config',
'openconfig_network_instance1779097864' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/apply-policy/config/import-policy',
'openconfig_network_instance526497570' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/apply-policy/config/default-import-policy',
'openconfig_network_instance251836598' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/apply-policy/config/export-policy',
'openconfig_network_instance1262935499' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/apply-policy/config/default-export-policy',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast',
'openconfig_network_instance3416997316' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/prefix-limit',
'openconfig_network_instance3096500951' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/prefix-limit/config',
'openconfig_network_instance936334426' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/prefix-limit/config/max-prefixes',
'openconfig_network_instance851073941' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/prefix-limit/config/prevent-teardown',
'openconfig_network_instance1159885450' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/prefix-limit/config/warning-threshold-pct',
'openconfig_network_instance1472951104' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/prefix-limit/config/restart-timer',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/config',
'openconfig_network_instance626341485' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/config/send-default-route',
'openconfig_bgp_ext2561500065' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv4-unicast/config/openconfig-bgp-ext:default-policy-name',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast',
'openconfig_network_instance3580119146' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/prefix-limit',
'openconfig_network_instance4250519641' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/prefix-limit/config',
'openconfig_network_instance622781667' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/prefix-limit/config/max-prefixes',
'openconfig_network_instance1452468743' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/prefix-limit/config/prevent-teardown',
'openconfig_network_instance416397439' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/prefix-limit/config/warning-threshold-pct',
'openconfig_network_instance987346232' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/prefix-limit/config/restart-timer',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/config',
'openconfig_network_instance1514043555' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/config/send-default-route',
'openconfig_bgp_ext777259601' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/ipv6-unicast/config/openconfig-bgp-ext:default-policy-name',
'openconfig_network_instance_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_l2vpn_evpn' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn',
'openconfig_network_instance73029077' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit',
'openconfig_network_instance202630882' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config',
'openconfig_network_instance880731655' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config/max-prefixes',
'openconfig_network_instance4210765519' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config/prevent-teardown',
'openconfig_network_instance3458553894' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config/warning-threshold-pct',
'openconfig_network_instance1514201447' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/l2vpn-evpn/prefix-limit/config/restart-timer',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_allow_own_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:allow-own-as',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_allow_own_as_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:allow-own-as/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_allow_own_as_config_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:allow-own-as/config/enabled',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_allow_own_as_config_as_count' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:allow-own-as/config/as-count',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_allow_own_as_config_origin' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:allow-own-as/config/origin',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_attribute_unchanged' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:attribute-unchanged',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_attribute_unchanged_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:attribute-unchanged/config',
'openconfig_bgp_ext2045507776' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:attribute-unchanged/config/as-path',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_attribute_unchanged_config_med' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:attribute-unchanged/config/med',
'openconfig_bgp_ext1860016533' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:attribute-unchanged/config/next-hop',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_filter_list' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:filter-list',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_filter_list_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:filter-list/config',
'openconfig_bgp_ext284495364' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:filter-list/config/import-policy',
'openconfig_bgp_ext4092261296' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:filter-list/config/export-policy',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_next_hop_self' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:next-hop-self',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_next_hop_self_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:next-hop-self/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_next_hop_self_config_enabled' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:next-hop-self/config/enabled',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_next_hop_self_config_force' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:next-hop-self/config/force',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_prefix_list' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:prefix-list',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_prefix_list_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:prefix-list/config',
'openconfig_bgp_ext367772702' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:prefix-list/config/import-policy',
'openconfig_bgp_ext1376237526' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:prefix-list/config/export-policy',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_remove_private_as' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:remove-private-as',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_remove_private_as_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:remove-private-as/config',
'openconfig_bgp_ext2741086768' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:remove-private-as/config/enabled',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_remove_private_as_config_all' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:remove-private-as/config/all',
'openconfig_bgp_ext1124459141' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:remove-private-as/config/replace-as',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_capability_orf' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:capability-orf',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_capability_orf_config' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:capability-orf/config',
'openconfig_bgp_ext_network_instances_network_instance_protocols_protocol_bgp_peer_groups_peer_group_afi_safis_afi_safi_capability_orf_config_orf_type' :
'/restconf/data/openconfig-network-instance:network-instances/network-instance={name}/protocols/protocol={identifier},{name1}/bgp/peer-groups/peer-group={peer-group-name}/afi-safis/afi-safi={afi-safi-name}/openconfig-bgp-ext:capability-orf/config/orf-type',
}
| 173.41457
| 282
| 0.831327
| 16,650
| 130,928
| 6.218138
| 0.017297
| 0.168933
| 0.15062
| 0.20301
| 0.980151
| 0.978248
| 0.976819
| 0.975524
| 0.975061
| 0.974375
| 0
| 0.009914
| 0.043138
| 130,928
| 754
| 283
| 173.644562
| 0.816491
| 0
| 0
| 0
| 0
| 0.498674
| 0.939516
| 0.939516
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.01061
| 0.018568
| 0
| 0.018568
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
fad8dbc853539467946e8b3cc4ea46bf2a0e2b79
| 48
|
py
|
Python
|
gpvolve/markov/_generate_tmatrix/__init__.py
|
harmsm/gpvolve
|
94dd71e5fbee29cc50b82d282ef33c850fb33575
|
[
"MIT"
] | 1
|
2021-12-05T23:00:59.000Z
|
2021-12-05T23:00:59.000Z
|
gpvolve/markov/utils/_generate_tmatrix/__init__.py
|
clararehmann/gpvolve
|
4e45b53b72184425c24d57b2e8779d3d51de39d7
|
[
"MIT"
] | null | null | null |
gpvolve/markov/utils/_generate_tmatrix/__init__.py
|
clararehmann/gpvolve
|
4e45b53b72184425c24d57b2e8779d3d51de39d7
|
[
"MIT"
] | 2
|
2021-09-27T17:51:31.000Z
|
2021-11-04T15:35:51.000Z
|
from .generate_tmatrix import generate_tmatrix
| 16
| 46
| 0.875
| 6
| 48
| 6.666667
| 0.666667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104167
| 48
| 2
| 47
| 24
| 0.930233
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
faee7ccdb5a22fd6b3af663e6ed0b3556fabfa10
| 21,047
|
py
|
Python
|
lib/txt_logs.py
|
diegoscastanho/LiconIA
|
d6d8c4f250c5ade3010bd72d6e662e3393d3226e
|
[
"MIT"
] | 4
|
2021-06-19T17:21:13.000Z
|
2021-06-20T19:50:29.000Z
|
lib/txt_logs.py
|
Donanfer1/LiconIA
|
d6d8c4f250c5ade3010bd72d6e662e3393d3226e
|
[
"MIT"
] | null | null | null |
lib/txt_logs.py
|
Donanfer1/LiconIA
|
d6d8c4f250c5ade3010bd72d6e662e3393d3226e
|
[
"MIT"
] | 1
|
2021-06-19T19:24:47.000Z
|
2021-06-19T19:24:47.000Z
|
"""
@author: Diego Solak Castanho
https://github.com/diegoscastanho
"""
import os
import pickle
import logging
from copy import deepcopy
# General procedures to show the log
logging.basicConfig(
format=(
'%(asctime)s - %(levelname)s: ' +
'(%(filename)s:%(funcName)s at %(lineno)d): %(message)s'),
datefmt='%b %d %H:%M:%S', level=logging.INFO)
class PSO_PlotLogResults(object):
"""
Class responsible for generating the log files
"""
def __init__(self, obj, configs, best_particles, best):
"""
Variable mapping
"""
self.obj = obj
self.configs = configs
self.best_particles = best_particles #best of all executions
self.best = best # list with the best of each execution
self.file_name = deepcopy(obj.interface.open_file_field.text())
del(obj.child_conn)
del(obj.interface)
self.data = {}
self.data['obj'] = obj
self.data["configs"] = configs
self.data["best_particles"] = best_particles
self.data["best"] = best
def create_log(self):
"""
Creates the log for the PSO
"""
# json log
newpath = "results/pso/json_logs"
if not os.path.exists(newpath):
os.makedirs(newpath)
file_path = "results/pso/json_logs/pso_day_delay_" + str(self.best.delay_day) + ".txt"
with open(file_path, 'wb') as handle:
pickle.dump(self.data, handle)
# txt log
newpath = "results/pso/txt_logs"
if not os.path.exists(newpath):
os.makedirs(newpath)
file_name = "results/pso/txt_logs/pso_day_delay_" + str(self.best.delay_day) + ".txt"
self.basic_config(file_name)
self.executions_log(file_name)
self.execution_log(file_name)
def basic_config(self, file_name):
"""
Basic simulation settings for PSO
"""
file = open(file_name, 'w')
file.write("----------------GLM-PSO------------------------\n")
file.write("GLM PSO - Settings: Delay " + str(self.configs.delay_day) + "\n")
file.write("File Name: " + str(self.file_name) + "\n")
file.write("Particle number:" + str(self.configs.num_particles) + "\n")
file.write("Number of iterations:" + str(self.configs.num_iterations) + "\n")
file.write("Number od executions:" + str(self.configs.num_executions) + "\n")
file.write("Day index:" + str(self.configs.delay_day) + "\n")
file.write("Prediction index:" + str(self.obj.real_internation_index) + "\n")
file.write("Day of the week index:" + str(self.obj.index_day_week) + "\n")
file.write("Number of test samples:" + str(self.obj.len_test) + "\n")
file.write("Number of trainning samples:" + str(self.obj.len_train) + "\n")
if self.configs.classic_vel:
file.write("Type of velocity: Classic\n")
if self.configs.canonica_vel:
file.write("Type of velocity: Canonica\n")
if self.obj.type_mse_cost:
file.write("Type of cost: MSE (mean square error)\n")
if self.obj.type_ae_cost:
file.write("Type of cost: AE (absolute error)\n")
if self.configs.inertial_constant:
file.write("Inertial Type: Constant\n")
file.write("Inercia Weight (W):" + str(self.configs.inertia_weight) + "\n")
if self.configs.inertial_fall:
file.write("Inertial Type: Linear Fall\n")
file.write("W lower:" + str(self.configs.w_inf) + "\n")
file.write("w upper:" + str(self.configs.w_sup) + "\n")
if self.configs.inertial_rise:
file.write("Inertial Type: Linear Rise\n")
file.write("W lower:" + str(self.configs.w_inf) + "\n")
file.write("w upper:" + str(self.configs.w_sup) + "\n")
if self.configs.constant_normal:
file.write("Type of Constants: Normal\n")
file.write("Cognite Constant(C1):" + str(self.configs.cognat_const) + "\n")
file.write("Social Constant(C2):" + str(self.configs.social_const) + "\n\n")
if self.configs.constant_dynamic:
file.write("Type of Constants: Dynamic\n")
file.write("C1 lower:" + str(self.configs.c1_inf) + "\n")
file.write("C2 lower:" + str(self.configs.c2_inf) + "\n")
file.write("k value:" + str(self.configs.k_value) + "\n")
file.write("fi:" + str(self.configs.val_fi) + "\n\n")
file.close()
def executions_log(self, file_name):
"""
Results by execution for PSO
"""
file = open(file_name, 'a')
file.write("-----------------Results by Execution-----------------------\n")
for num, exec_ in enumerate(self.best_particles):
file.write("Execution nº:" + str(num) + "\n")
file.write("convergence generation:" + str(exec_.convergence_generation) + "\n")
file.write("position:" + str(exec_.pos) + "\n")
file.write("velocity:" + str(exec_.vel) + "\n")
file.write("cost:" + str(exec_.cost) + "\n")
file.write("fitness:" + str(exec_.fit) + "\n")
file.write("local best fitness:" + str(exec_.lb_fit) + "\n")
file.write("local best position:" + str(exec_.lb_pos) + "\n")
file.write("AE (Absolute Error):" + str(exec_.ae) + "\n")
file.write("MSE (Mean Square Error):" + str(exec_.mse) + "\n")
file.write("MAPE (Mean Absolute Percentage Error):" + str(exec_.mape) + "\n")
file.write("ARV (Average Relative Variance):" + str(exec_.arv) + "\n")
file.write("IA (Index of Agreement):" + str(exec_.ia) + "\n")
file.write("MAE (Mean Absolute Error):" + str(exec_.mae) + "\n")
file.write("RMSE (Root Mean Squared Error):" + str(exec_.rmse) + "\n")
file.write("Mean:" + str(exec_.mean) + "\n")
file.write("Ranking:" + str(exec_.ranking) + "\n")
file.write("training prediction:" + str(exec_.prediction) + "\n")
file.write("test prediction:" + str(exec_.test_prediction) + "\n\n\n")
file.close()
def execution_log(self, file_name):
"""
Best execution results for PSO
"""
file = open(file_name, 'a')
file.write("-----------------Best execution results-----------------------\n")
file.write("convergence generation:" + str(self.best.convergence_generation) + "\n")
file.write("position:" + str(self.best.pos) + "\n")
file.write("velocity:" + str(self.best.vel) + "\n")
file.write("cost:" + str(self.best.cost) + "\n")
file.write("fitness:" + str(self.best.fit) + "\n")
file.write("local best fitness:" + str(self.best.lb_fit) + "\n")
file.write("local best position:" + str(self.best.lb_pos) + "\n")
file.write("AE (Absolute Error):" + str(self.best.ae) + "\n")
file.write("MSE (Mean Square Error):" + str(self.best.mse) + "\n")
file.write("MAPE (Mean Absolute Percentage Error):" + str(self.best.mape) + "\n")
file.write("ARV (Average Relative Variance):" + str(self.best.arv) + "\n")
file.write("IA (Index of Agreement):" + str(self.best.ia) + "\n")
file.write("MAE (Mean Absolute Error):" + str(self.best.mae) + "\n")
file.write("RMSE (Root Mean Squared Error):" + str(self.best.rmse) + "\n")
file.write("Mean:" + str(self.best.mean) + "\n")
file.write("Ranking:" + str(self.best.ranking) + "\n")
file.write("training prediction:" + str(self.best.prediction) + "\n")
file.write("test prediction:" + str(self.best.test_prediction) + "\n\n\n")
file.close()
class DE_PlotLogResults(object):
"""
Class responsible for generating the log files
"""
def __init__(self, obj, configs, best_particles, best):
"""
Variable mapping
"""
self.obj = obj
self.configs = configs
self.best_particles = best_particles #best of all executions
self.best = best # list with the best of each execution
self.file_name = deepcopy(obj.interface.open_file_field.text())
del(obj.child_conn)
del(obj.interface)
self.data = {}
self.data['obj'] = obj
self.data["configs"] = configs
self.data["best_particles"] = best_particles
self.data["best"] = best
def create_log(self):
"""
Creates the log for the DE
"""
# json log
newpath = "results/de/json_logs"
if not os.path.exists(newpath):
os.makedirs(newpath)
file_path = "results/de/json_logs/de_day_delay_" + str(self.best.delay_day) + ".txt"
with open(file_path, 'wb') as handle:
pickle.dump(self.data, handle)
# txt log
newpath = "results/de/txt_logs"
if not os.path.exists(newpath):
os.makedirs(newpath)
file_name = "results/de/txt_logs/de_day_delay_" + str(self.best.delay_day) + ".txt"
self.basic_config(file_name)
self.executions_log(file_name)
self.execution_log(file_name)
def basic_config(self, file_name):
"""
Basic simulation settings for DE
"""
file = open(file_name, 'w')
file.write("----------------GLM-DE------------------------\n")
file.write("GLM DE - Settings: Delay " + str(self.configs.delay_day) + "\n")
file.write("File Name: " + str(self.file_name) + "\n")
file.write("Population size:" + str(self.configs.population_size) + "\n")
file.write("Number of generations:" + str(self.configs.num_generations) + "\n")
file.write("Number od executions:" + str(self.configs.num_executions) + "\n")
file.write("Day index:" + str(self.configs.delay_day) + "\n")
file.write("Prediction index:" + str(self.obj.real_internation_index) + "\n")
file.write("Day of the week index:" + str(self.obj.index_day_week) + "\n")
file.write("Number of test samples:" + str(self.obj.len_test) + "\n")
file.write("Number of trainning samples:" + str(self.obj.len_train) + "\n")
if self.configs.exponential_crossover:
file.write("Type of Crossover: Exponential\n")
elif self.configs.binary_crossover:
file.write("Type of Crossover: Binary\n")
if self.obj.type_mse_cost:
file.write("Type of cost: MSE (mean square error)\n")
elif self.obj.type_ae_cost:
file.write("Type of cost: AE (absolute error)\n")
if self.configs.rand_mutation:
file.write("Type of Mutation: Rand\n")
elif self.configs.rand2dp_mutation:
file.write("Type of Mutation: Rand2DB\n")
elif self.configs.best_mutation:
file.write("Type of Mutation: Best\n")
elif self.configs.best2dp_mutation:
file.write("Type of Mutation: Best2DP\n")
elif self.configs.target_mutation:
file.write("Type of Mutation: Target to Best\n")
file.write("Social of F:" + str(self.configs.f_number) + "\n")
file.write("Crossover rate:" + str(self.configs.crossover_rate) + "\n")
file.write("Mutation rate:" + str(self.configs.mutation_rate) + "\n\n")
file.close()
def executions_log(self, file_name):
"""
Results by execution for DE
"""
file = open(file_name, 'a')
file.write("-----------------Results by Execution-----------------------\n")
for num, exec_ in enumerate(self.best_particles):
file.write("Execution nº:" + str(num) + "\n")
file.write("convergence generation:" + str(exec_.convergence_generation) + "\n")
file.write("cromo:" + str(exec_.cromo) + "\n")
file.write("cost:" + str(exec_.cost) + "\n")
file.write("fitness:" + str(exec_.fit) + "\n")
file.write("AE (Absolute Error):" + str(exec_.ae) + "\n")
file.write("MSE (Mean Square Error):" + str(exec_.mse) + "\n")
file.write("MAPE (Mean Absolute Percentage Error):" + str(exec_.mape) + "\n")
file.write("ARV (Average Relative Variance):" + str(exec_.arv) + "\n")
file.write("IA (Index of Agreement):" + str(exec_.ia) + "\n")
file.write("MAE (Mean Absolute Error):" + str(exec_.mae) + "\n")
file.write("RMSE (Root Mean Squared Error):" + str(exec_.rmse) + "\n")
file.write("Mean:" + str(exec_.mean) + "\n")
file.write("Ranking:" + str(exec_.ranking) + "\n")
file.write("training prediction:" + str(exec_.prediction) + "\n")
file.write("test prediction:" + str(exec_.test_prediction) + "\n\n\n")
file.close()
def execution_log(self, file_name):
"""
Best execution results for DE
"""
file = open(file_name, 'a')
file.write("-----------------Best execution results-----------------------\n")
file.write("convergence generation:" + str(self.best.convergence_generation) + "\n")
file.write("cromo:" + str(self.best.cromo) + "\n")
file.write("cost:" + str(self.best.cost) + "\n")
file.write("fitness:" + str(self.best.fit) + "\n")
file.write("AE (Absolute Error):" + str(self.best.ae) + "\n")
file.write("MSE (Mean Square Error):" + str(self.best.mse) + "\n")
file.write("MAPE (Mean Absolute Percentage Error):" + str(self.best.mape) + "\n")
file.write("ARV (Average Relative Variance):" + str(self.best.arv) + "\n")
file.write("IA (Index of Agreement):" + str(self.best.ia) + "\n")
file.write("MAE (Mean Absolute Error):" + str(self.best.mae) + "\n")
file.write("RMSE (Root Mean Squared Error):" + str(self.best.rmse) + "\n")
file.write("Mean:" + str(self.best.mean) + "\n")
file.write("Ranking:" + str(self.best.ranking) + "\n")
file.write("training prediction:" + str(self.best.prediction) + "\n")
file.write("test prediction:" + str(self.best.test_prediction) + "\n\n\n")
file.close()
class GA_PlotLogResults(object):
"""
Class responsible for generating the log files
"""
def __init__(self, obj, configs, best_particles, best):
"""
Variable mapping
"""
self.obj = obj
self.configs = configs
self.best_particles = best_particles #best of all executions
self.best = best # list with the best of each execution
self.file_name = deepcopy(obj.interface.open_file_field.text())
del(obj.child_conn)
del(obj.interface)
self.data = {}
self.data['obj'] = obj
self.data["configs"] = configs
self.data["best_particles"] = best_particles
self.data["best"] = best
def create_log(self):
"""
Creates the log for the GA
"""
# json log
newpath = "results/ga/json_logs"
if not os.path.exists(newpath):
os.makedirs(newpath)
file_path = "results/ga/json_logs/ga_day_delay_" + str(self.best.delay_day) + ".txt"
with open(file_path, 'wb') as handle:
pickle.dump(self.data, handle)
# txt log
newpath = "results/ga/txt_logs"
if not os.path.exists(newpath):
os.makedirs(newpath)
file_name = "results/ga/txt_logs/ga_day_delay_" + str(self.best.delay_day) + ".txt"
self.basic_config(file_name)
self.executions_log(file_name)
self.execution_log(file_name)
def basic_config(self, file_name):
"""
Basic simulation settings for GA
"""
file = open(file_name, 'w')
file.write("----------------GLM-GA------------------------\n")
file.write("File Name: " + str(self.file_name) + "\n")
file.write("GLM GA - Settings: Delay " + str(self.configs.delay_day) + "\n")
file.write("Population size:" + str(self.configs.population_size) + "\n")
file.write("Number of generations:" + str(self.configs.num_generations) + "\n")
file.write("Number od executions:" + str(self.configs.num_executions) + "\n")
file.write("Day index:" + str(self.configs.delay_day) + "\n")
file.write("Prediction index:" + str(self.obj.real_internation_index) + "\n")
file.write("Day of the week index:" + str(self.obj.index_day_week) + "\n")
file.write("Number of test samples:" + str(self.obj.len_test) + "\n")
file.write("Number of trainning samples:" + str(self.obj.len_train) + "\n")
# Type of Selection
if self.configs.roulette_tournament:
file.write("Type of Selection: Roulette\n")
elif self.configs.roulette_sus_tournament:
file.write("Type of Selection: Roulette SUS - Stochastic Universal Sampling \n")
elif self.configs.single_tournament:
file.write("Type of Selection: Single Tournament\n")
elif self.configs.death_tournament:
file.write("Type of Selection: Death Tournament\n")
elif self.configs.niching_tournament:
file.write("Type of Selection: Niching Tournament\n")
# Type of Crossover
if self.configs.point_crossover:
file.write("Type of Crossover: Point\n")
elif self.configs.arithmetic_crossover:
file.write("Type of Crossover: Arithmetic\n")
elif self.configs.sbx_crossover:
file.write("Type of Crossover: SBX\n")
if self.obj.type_mse_cost:
file.write("Type of cost: MSE (mean square error)\n")
elif self.obj.type_ae_cost:
file.write("Type of cost: AE (absolute error)\n")
# Type of Mutation
if self.configs.fixed_mutation:
file.write("Type of Mutation: Fixed\n")
elif self.configs.dyn_mutation:
file.write("Type of Mutation: Dynamic\n")
file.write("Local Search:" + str(self.configs.local_seach) + "\n")
file.write("Crossover rate:" + str(self.configs.crossover_rate) + "\n")
file.write("Mutation rate:" + str(self.configs.mutation_rate) + "\n\n")
file.close()
def executions_log(self, file_name):
"""
Results by execution for GA
"""
file = open(file_name, 'a')
file.write("-----------------Results by Execution-----------------------\n")
for num, exec_ in enumerate(self.best_particles):
file.write("Execution nº:" + str(num) + "\n")
file.write("convergence generation:" + str(exec_.convergence_generation) + "\n")
file.write("cromo:" + str(exec_.cromo) + "\n")
file.write("cost:" + str(exec_.cost) + "\n")
file.write("fitness:" + str(exec_.fit) + "\n")
file.write("AE (Absolute Error):" + str(exec_.ae) + "\n")
file.write("MSE (Mean Square Error):" + str(exec_.mse) + "\n")
file.write("MAPE (Mean Absolute Percentage Error):" + str(exec_.mape) + "\n")
file.write("ARV (Average Relative Variance):" + str(exec_.arv) + "\n")
file.write("IA (Index of Agreement):" + str(exec_.ia) + "\n")
file.write("MAE (Mean Absolute Error):" + str(exec_.mae) + "\n")
file.write("RMSE (Root Mean Squared Error):" + str(exec_.rmse) + "\n")
file.write("Mean:" + str(exec_.mean) + "\n")
file.write("Ranking:" + str(exec_.ranking) + "\n")
file.write("training prediction:" + str(exec_.prediction) + "\n")
file.write("test prediction:" + str(exec_.test_prediction) + "\n\n\n")
file.close()
def execution_log(self, file_name):
"""
Best execution results for DE
"""
file = open(file_name, 'a')
file.write("-----------------Best execution results-----------------------\n")
file.write("convergence generation:" + str(self.best.convergence_generation) + "\n")
file.write("cromo:" + str(self.best.cromo) + "\n")
file.write("cost:" + str(self.best.cost) + "\n")
file.write("fitness:" + str(self.best.fit) + "\n")
file.write("AE (Absolute Error):" + str(self.best.ae) + "\n")
file.write("MSE (Mean Square Error):" + str(self.best.mse) + "\n")
file.write("MAPE (Mean Absolute Percentage Error):" + str(self.best.mape) + "\n")
file.write("ARV (Average Relative Variance):" + str(self.best.arv) + "\n")
file.write("IA (Index of Agreement):" + str(self.best.ia) + "\n")
file.write("MAE (Mean Absolute Error):" + str(self.best.mae) + "\n")
file.write("RMSE (Root Mean Squared Error):" + str(self.best.rmse) + "\n")
file.write("Mean:" + str(self.best.mean) + "\n")
file.write("Ranking:" + str(self.best.ranking) + "\n")
file.write("training prediction:" + str(self.best.prediction) + "\n")
file.write("test prediction:" + str(self.best.test_prediction) + "\n\n\n")
file.close()
| 46.979911
| 94
| 0.573811
| 2,671
| 21,047
| 4.411082
| 0.080494
| 0.141317
| 0.121372
| 0.034374
| 0.869886
| 0.849346
| 0.798506
| 0.789509
| 0.7789
| 0.771431
| 0
| 0.000633
| 0.249917
| 21,047
| 447
| 95
| 47.085011
| 0.745677
| 0.044187
| 0
| 0.710059
| 0
| 0
| 0.253096
| 0.037048
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044379
| false
| 0
| 0.011834
| 0
| 0.065089
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
faf65e9d47ffc4ac2b4724c512856622f11a26d5
| 7,268
|
py
|
Python
|
curve_plotting.py
|
cgao/tax_reform
|
66ec065d630c7778bb86a548ef9fd69a5fd5647a
|
[
"MIT"
] | null | null | null |
curve_plotting.py
|
cgao/tax_reform
|
66ec065d630c7778bb86a548ef9fd69a5fd5647a
|
[
"MIT"
] | null | null | null |
curve_plotting.py
|
cgao/tax_reform
|
66ec065d630c7778bb86a548ef9fd69a5fd5647a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 5 12:05:30 2017
@author: cgao
"""
import matplotlib
import matplotlib.pyplot as plt
import tax_reform
#tax_reform.tax_comparison(240000, 3, 1, 700000, 0.03, 0.06, 18000, joint = True, detail = True)
def comparison_curve(
income_low = 10000,
income_high = 500000,
income_interval = 10000,
family_size = 3,
children = 1,
UPB = 700000,
rate = 0.0275,
efficient_state_rate = 0.05,
local_tax = 16500,
joint = True,
existing_mtg = False
):
taxable_incomes = [income_interval*x for x in range(int(income_low/income_interval), int(income_high/income_interval)+1)]
taxes = [tax_reform.tax_comparison(taxable_income, family_size, children, UPB, rate, efficient_state_rate, local_tax, joint = joint, existing_mtg = existing_mtg, display = False) for taxable_income in taxable_incomes]
taxes_old = [x[0] for x in taxes]
taxes_new = [x[1] for x in taxes]
tax_reductions = [x[0] - x[1] for x in taxes]
plt.figure(figsize=(10,5))
plt.plot(taxable_incomes, taxes_old, 'r', label = 'Current Federal Tax ($)')
plt.plot(taxable_incomes, taxes_new, 'b', label = 'New Federal Tax ($)')
plt.xlabel('taxable income ($)', fontsize = 10)
ax = plt.gca()
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
plt.legend(fontsize = 10)
plt.grid()
plt.show()
# tax reduction plot
plt.figure(figsize=(10,5))
plt.plot(taxable_incomes, tax_reductions, 'g', label = 'Tax Reduction Under New Tax Proposal ($)')
plt.xlabel('taxable income ($)', fontsize = 10)
ax = plt.gca()
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
ax.text(0.05, 0.80, 'Family = %d; Children = %d'%(family_size, children),
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes,
color='red', fontsize=10)
ax.text(0.05, 0.75, 'Existing Mortgage = %r'%existing_mtg + '; UPB = ${:,}'.format(UPB) + '; Rate = %3.2f%%'%(rate*100),
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes,
color='red', fontsize=10)
ax.text(0.05, 0.70, 'State Rate = %3.2f%%'%(efficient_state_rate*100) + '; Local Tax = ${:,}'.format(local_tax),
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes,
color='red', fontsize=10)
ax.text(0.05, 0.65, 'Joint = %s'%(joint),
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes,
color='red', fontsize=10)
plt.legend(fontsize = 10)
plt.grid()
plt.show()
def AMT_planning(
income_low = 10000,
income_high = 500000,
income_interval = 10000,
family_size = 3,
children = 1,
UPB = 700000,
rate = 0.0275,
efficient_state_rate = 0.05,
local_tax = 16500,
joint = True,
existing_mtg = False
):
taxable_incomes = [income_interval*x for x in range(int(income_low/income_interval), int(income_high/income_interval)+1)]
taxes = [tax_reform.tax_comparison(taxable_income, family_size, children, UPB, rate, efficient_state_rate, local_tax, joint = joint, existing_mtg = existing_mtg, display = False) for taxable_income in taxable_incomes]
# return [tax_old, tax_new, old_tax_standard, new_tax_standard, old_tax_itemized, new_tax_itemized, old_tax_AMT]
old_tax = [x[0] for x in taxes]
old_tax_standard = [x[2] for x in taxes]
old_tax_itemized = [x[4] for x in taxes]
old_tax_AMT = [x[6] for x in taxes]
AMT_penalty = [x[0] - x[4] for x in taxes]
# compare standard tax, itemized tax and AMT tax.
plt.figure(figsize=(10,5))
plt.plot(taxable_incomes, old_tax, 'rd', label = 'Federal Tax ($)')
plt.plot(taxable_incomes, old_tax_standard, 'b', label = 'Federal Tax by Standard ($)')
plt.plot(taxable_incomes, old_tax_itemized, 'g', label = 'Federal Tax by Itemized ($)')
plt.plot(taxable_incomes, old_tax_AMT, 'k', label = 'Federal Tax by ATM ($)')
plt.xlabel('taxable income ($)', fontsize = 10)
#plt.xticks(np.arange(0, 1.0*1e6, 50000))
#plt.yticks(np.arange(0, 4e5, 10000))
ax = plt.gca()
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
ax.text(0.05, 0.60, 'Family = %d; Children = %d'%(family_size, children),
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes,
color='red', fontsize=10)
ax.text(0.05, 0.55, 'Existing Mortgage = %r'%existing_mtg + '; UPB = ${:,}'.format(UPB) + '; Rate = %3.2f%%'%(rate*100),
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes,
color='red', fontsize=10)
ax.text(0.05, 0.50, 'State Rate = %3.2f%%'%(efficient_state_rate*100) + '; Local Tax = ${:,}'.format(local_tax),
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes,
color='red', fontsize=10)
ax.text(0.05, 0.65, 'Joint = %s'%(joint),
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes,
color='red', fontsize=10)
plt.legend(fontsize = 10)
plt.grid()
plt.show()
#AMT penalty curve
plt.figure(figsize=(10,5))
plt.plot(taxable_incomes, AMT_penalty, 'g', label = 'AMT penalty ($)')
plt.xlabel('taxable income ($)', fontsize = 10)
ax = plt.gca()
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
ax.text(0.05, 0.80, 'Family = %d; Children = %d'%(family_size, children),
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes,
color='red', fontsize=10)
ax.text(0.05, 0.75, 'Existing Mortgage = %r'%existing_mtg + '; UPB = ${:,}'.format(UPB) + '; Rate = %3.2f%%'%(rate*100),
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes,
color='red', fontsize=10)
ax.text(0.05, 0.70, 'State Rate = %3.2f%%'%(efficient_state_rate*100) + '; Local Tax = ${:,}'.format(local_tax),
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes,
color='red', fontsize=10)
ax.text(0.05, 0.65, 'Joint = %s'%(joint),
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes,
color='red', fontsize=10)
plt.legend(fontsize = 10)
plt.grid()
plt.show()
| 48.778523
| 222
| 0.629196
| 953
| 7,268
| 4.655824
| 0.151102
| 0.045076
| 0.032454
| 0.024341
| 0.836376
| 0.834122
| 0.773946
| 0.773946
| 0.766509
| 0.730449
| 0
| 0.051812
| 0.213952
| 7,268
| 148
| 223
| 49.108108
| 0.724838
| 0.063704
| 0
| 0.813953
| 0
| 0
| 0.119458
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015504
| false
| 0
| 0.023256
| 0
| 0.03876
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8786655e56e474784f4631dc10d2ef02e62f6880
| 3,166
|
py
|
Python
|
naqttool/ankimodels.py
|
gaufqwi/naqttool
|
b6e32e3085de0350af8deac2627aa78a99fbe732
|
[
"MIT"
] | null | null | null |
naqttool/ankimodels.py
|
gaufqwi/naqttool
|
b6e32e3085de0350af8deac2627aa78a99fbe732
|
[
"MIT"
] | null | null | null |
naqttool/ankimodels.py
|
gaufqwi/naqttool
|
b6e32e3085de0350af8deac2627aa78a99fbe732
|
[
"MIT"
] | null | null | null |
naqt_basic_front = """
<h1>NAQT You Gotta Know</h1>
<div class='qcard'>
{{#Category}}<h2>{{Category}}</h2><br/>{{/Category}}
{{Prompt}}
<br/><br/>
<span class="answer-hidden">{{Answer}}</span>
</div>
<div class="source">
<span class="label">Source:</span> <span class="content">{{Source}}</span>
</div>
"""
naqt_basic_back = """
<h1>NAQT You Gotta Know</h1>
<div class='qcard'>
{{#Category}}<h2>{{Category}}</h2><br/>{{/Category}}
{{Prompt}}
<br/><br/>
<span class="answer">{{Answer}}</span>
</div>
<div class="source">
<span class="label">Source:</span> <span class="content">{{Source}}</span>
</div>
{{#Extra}}
<br/>
<div class="extra">
{{Extra}}
</div>
{{/Extra}}
"""
naqt_basic_css = """
body {
background-color: #cceeee;
}
h1 {
text-align: center;
}
.card {
font-family: arial;
font-size: 20px;
color: black;
}
.qcard {
width: 80%;
margin: 4px auto;
background-color: white;
border: 1px solid black;
border-radius: 4px;
padding: 4px 8px;
}
.qcard h2 {
font-size: 130%;
margin: 0;
color: darkblue;
}
.qcard .answer {
color: maroon;
}
.qcard .answer-hidden {
visibility: hidden;
}
.source {
width: 80%;
margin: 4px auto;
font-size: 70%;
text-align: right;
padding: 0 4px;
}
.source .label {
font-weight: bold;
}
.source .content {
font-family: monospace;
font-size: 90%;
}
.extra {
width: 80%;
margin: 4px auto;
}
"""
naqt_cloze_front = """
<h1>NAQT You Gotta Know</h1>
<div class='qcard'>
{{#Category}}<h2>{{Category}}</h2><br/>{{/Category}}
{{cloze:Text}}
</div>
<div class="source">
<span class="label">Source:</span> <span class="content">{{Source}}</span>
</div>
"""
naqt_cloze_back = """
<h1>NAQT You Gotta Know</h1>
<div class='qcard'>
{{#Category}}<h2>{{Category}}</h2><br/>{{/Category}}
{{cloze:Text}}
</div>
<div class="source">
<span class="label">Source:</span> <span class="content">{{Source}}</span>
</div>
"""
naqt_cloze_css = """
body {
background-color: #cceeee;
}
h1 {
text-align: center;
}
.card {
font-family: arial;
font-size: 20px;
color: black;
}
.qcard {
width: 80%;
margin: 4px auto;
background-color: white;
border: 1px solid black;
border-radius: 4px;
padding: 4px 8px;
}
.qcard h2 {
font-size: 130%;
margin: 0;
color: darkblue;
}
.qcard .cloze {
font-weight: bold;
color: blue;
}
.source {
width: 80%;
margin: 4px auto;
font-size: 70%;
text-align: right;
padding: 0 4px;
}
.source .label {
font-weight: bold;
}
.source .content {
font-family: monospace;
font-size: 90%;
}
.extra {
width: 80%;
margin: 4px auto;
}
"""
naqt_basic_model_params = {
"modelName": "NAQT Basic",
"inOrderFields": ["Prompt", "Answer", "Category", "Source", "Extra"],
"css": naqt_basic_css,
"isCloze": False,
"cardTemplates": [
{
"Name": "Card 1",
"Front": naqt_basic_front,
"Back": naqt_basic_back
}
]
}
naqt_cloze_model_params = {
"modelName": "NAQT Cloze",
"inOrderFields": ["Text", "Category", "Source", "Extra"],
"css": naqt_cloze_css,
"isCloze": True,
"cardTemplates": [
{
"Name": "Card 1",
"Front": naqt_cloze_front,
"Back": naqt_cloze_back
}
]
}
| 14.725581
| 74
| 0.603917
| 402
| 3,166
| 4.681592
| 0.179104
| 0.063762
| 0.041445
| 0.05101
| 0.812965
| 0.785335
| 0.752391
| 0.752391
| 0.752391
| 0.752391
| 0
| 0.027576
| 0.1753
| 3,166
| 215
| 75
| 14.725581
| 0.693221
| 0
| 0
| 0.643678
| 0
| 0
| 0.804547
| 0.165141
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
87b6cf26eaf5dc4c5512bb0748913bdd7c92cd67
| 174
|
py
|
Python
|
Programs/twinkle.py
|
Ortieez/LearningPy
|
d0b6ee47e5c86b68deb6bc056a18a8f4e6f08157
|
[
"CNRI-Python"
] | null | null | null |
Programs/twinkle.py
|
Ortieez/LearningPy
|
d0b6ee47e5c86b68deb6bc056a18a8f4e6f08157
|
[
"CNRI-Python"
] | null | null | null |
Programs/twinkle.py
|
Ortieez/LearningPy
|
d0b6ee47e5c86b68deb6bc056a18a8f4e6f08157
|
[
"CNRI-Python"
] | null | null | null |
print("Twinkle, twinkle, little star,How I wonder what you are!Up above the world so high,Like a diamond in the sky.Twinkle, twinkle, little star,How I wonder what you are")
| 87
| 173
| 0.770115
| 33
| 174
| 4.060606
| 0.636364
| 0.208955
| 0.298507
| 0.358209
| 0.656716
| 0.656716
| 0.656716
| 0.656716
| 0.656716
| 0.656716
| 0
| 0
| 0.155172
| 174
| 1
| 174
| 174
| 0.911565
| 0
| 0
| 0
| 0
| 1
| 0.942529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
87e3c6a876739e642f0765359ec47b6b7f004834
| 47
|
py
|
Python
|
comment.py
|
EleonoraBorzis/group-composition-action
|
307ca083a90d250250bfdf2201d5c8730b3b820b
|
[
"MIT"
] | null | null | null |
comment.py
|
EleonoraBorzis/group-composition-action
|
307ca083a90d250250bfdf2201d5c8730b3b820b
|
[
"MIT"
] | 9
|
2021-03-30T14:03:24.000Z
|
2021-04-30T08:50:53.000Z
|
comment.py
|
EleonoraBorzis/group-composition-action
|
307ca083a90d250250bfdf2201d5c8730b3b820b
|
[
"MIT"
] | 2
|
2021-03-30T14:13:12.000Z
|
2021-04-04T07:47:51.000Z
|
print("::set-output name=test::hello_output")
| 23.5
| 46
| 0.723404
| 7
| 47
| 4.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 47
| 1
| 47
| 47
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.765957
| 0.489362
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
35660a74d6a62aba3b4a7446ee453caeec5a2086
| 1,302
|
py
|
Python
|
pylattice/matrix.py
|
steveKapturowski/pylattice
|
123cff0bb9de2bbe986a607ae8a3d236b2dcabda
|
[
"MIT"
] | 1
|
2021-01-31T21:08:52.000Z
|
2021-01-31T21:08:52.000Z
|
pylattice/matrix.py
|
steveKapturowski/pylattice
|
123cff0bb9de2bbe986a607ae8a3d236b2dcabda
|
[
"MIT"
] | null | null | null |
pylattice/matrix.py
|
steveKapturowski/pylattice
|
123cff0bb9de2bbe986a607ae8a3d236b2dcabda
|
[
"MIT"
] | 2
|
2018-07-04T09:17:29.000Z
|
2019-08-15T19:21:52.000Z
|
import numpy as np
DIRAC_MATRICES = np.array([
np.matrix([ #gamma_0
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0,-1, 0],
[0, 0, 0,-1],
], dtype=np.complex),
np.matrix([ #gamma_1
[0, 0, 0, 1],
[0, 0, 1, 0],
[0,-1, 0, 0],
[-1,0, 0, 0],
], dtype=np.complex),
np.matrix([ #gamma_2
[0, 0, 0,-1j],
[0, 0, 1j,0 ],
[0, 1j,0, 0 ],
[-1j,0, 0, 0 ],
], dtype=np.complex),
np.matrix([ #gamma_3
[0, 0, 1, 0],
[0, 0, 0,-1],
[-1,0, 0, 0],
[0, 1, 0, 0],
], dtype=np.complex),
])
CHIRAL_DIRAC_MATRICES = np.array([
np.matrix([ #gamma_0
[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 1, 0, 0],
], dtype=np.complex),
np.matrix([ #gamma_1
[0, 0, 0, 1j],
[0, 0, 1j,0],
[0,-1j,0, 0],
[-1j,0,0, 0],
], dtype=np.complex),
np.matrix([ #gamma_2
[0, 0, 0,-1j],
[0, 0, 1j,0 ],
[0, 1j,0, 0 ],
[-1j,0, 0, 0 ],
], dtype=np.complex),
np.matrix([ #gamma_3
[0, 0, 1j,0 ],
[0, 0, 0,-1j],
[-1j,0, 0, 0 ],
[0, 1j,0, 0 ],
], dtype=np.complex),
])
GAMMA_5 = np.matrix([
[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 1, 0, 0],
], dtype=np.complex)
CHIRAL_GAMMA_5 = np.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0,-1, 0],
[0, 0, 0,-1],
], dtype=np.complex)
ETA = np.matrix([
[-1,0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
], dtype=np.int8)
| 16.275
| 34
| 0.441628
| 266
| 1,302
| 2.109023
| 0.075188
| 0.313725
| 0.229947
| 0.128342
| 0.925134
| 0.900178
| 0.868093
| 0.868093
| 0.750446
| 0.748663
| 0
| 0.191795
| 0.251152
| 1,302
| 79
| 35
| 16.481013
| 0.38359
| 0.043011
| 0
| 0.859155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014085
| 0
| 0.014085
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
35891386af90279660ee687d6245651bb6ed1c30
| 41,973
|
py
|
Python
|
categories/richtext2/tests/selection.py
|
image72/browserscope
|
44a63558ee376704d996851099bc7703128201cc
|
[
"Apache-2.0"
] | 22
|
2015-10-26T15:20:37.000Z
|
2022-03-11T06:38:17.000Z
|
categories/richtext2/tests/selection.py
|
image72/browserscope
|
44a63558ee376704d996851099bc7703128201cc
|
[
"Apache-2.0"
] | 10
|
2016-01-22T18:46:19.000Z
|
2019-07-19T12:49:51.000Z
|
categories/richtext2/tests/selection.py
|
mcauer/browserscope
|
a9c0e1a250774f14689e06f93ad274d0b9d725e4
|
[
"Apache-2.0"
] | 12
|
2015-10-17T09:40:44.000Z
|
2019-06-08T19:54:36.000Z
|
SELECTION_TESTS = {
'id': 'S',
'caption': 'Selection Tests',
'checkAttrs': True,
'checkStyle': True,
'styleWithCSS': False,
'Proposed': [
{ 'desc': '',
'command': '',
'tests': [
]
},
{ 'desc': 'selectall',
'command': 'selectall',
'tests': [
{ 'id': 'SELALL_TEXT-1_SI',
'desc': 'select all, text only',
'pad': 'foo [bar] baz',
'expected': [ '[foo bar baz]',
'{foo bar baz}' ] },
{ 'id': 'SELALL_I-1_SI',
'desc': 'select all, with outer tags',
'pad': '<i>foo [bar] baz</i>',
'expected': '{<i>foo bar baz</i>}' }
]
},
{ 'desc': 'unselect',
'command': 'unselect',
'tests': [
{ 'id': 'UNSEL_TEXT-1_SI',
'desc': 'unselect',
'pad': 'foo [bar] baz',
'expected': 'foo bar baz' }
]
},
{ 'desc': 'sel.modify (generic)',
'tests': [
{ 'id': 'SM:m.f.c_TEXT-1_SC-1',
'desc': 'move caret 1 character forward',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'foo b^ar baz',
'expected': 'foo ba^r baz' },
{ 'id': 'SM:m.b.c_TEXT-1_SC-1',
'desc': 'move caret 1 character backward',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'foo b^ar baz',
'expected': 'foo ^bar baz' },
{ 'id': 'SM:m.f.c_TEXT-1_SI-1',
'desc': 'move caret forward (sollapse selection)',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'foo [bar] baz',
'expected': 'foo bar^ baz' },
{ 'id': 'SM:m.b.c_TEXT-1_SI-1',
'desc': 'move caret backward (collapse selection)',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'foo [bar] baz',
'expected': 'foo ^bar baz' },
{ 'id': 'SM:m.f.w_TEXT-1_SC-1',
'desc': 'move caret 1 word forward',
'function': 'sel.modify("move", "forward", "word");',
'pad': 'foo b^ar baz',
'expected': 'foo bar^ baz' },
{ 'id': 'SM:m.f.w_TEXT-1_SC-2',
'desc': 'move caret 1 word forward',
'function': 'sel.modify("move", "forward", "word");',
'pad': 'foo^ bar baz',
'expected': 'foo bar^ baz' },
{ 'id': 'SM:m.f.w_TEXT-1_SI-1',
'desc': 'move caret 1 word forward from non-collapsed selection',
'function': 'sel.modify("move", "forward", "word");',
'pad': 'foo [bar] baz',
'expected': 'foo bar baz^' },
{ 'id': 'SM:m.b.w_TEXT-1_SC-1',
'desc': 'move caret 1 word backward',
'function': 'sel.modify("move", "backward", "word");',
'pad': 'foo b^ar baz',
'expected': 'foo ^bar baz' },
{ 'id': 'SM:m.b.w_TEXT-1_SC-3',
'desc': 'move caret 1 word backward',
'function': 'sel.modify("move", "backward", "word");',
'pad': 'foo bar ^baz',
'expected': 'foo ^bar baz' },
{ 'id': 'SM:m.b.w_TEXT-1_SI-1',
'desc': 'move caret 1 word backward from non-collapsed selection',
'function': 'sel.modify("move", "backward", "word");',
'pad': 'foo [bar] baz',
'expected': '^foo bar baz' }
]
},
{ 'desc': 'sel.modify: move forward over combining diacritics, etc.',
'tests': [
{ 'id': 'SM:m.f.c_CHAR-2_SC-1',
'desc': 'move 1 character forward over combined o with diaeresis',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'fo^öbarbaz',
'expected': 'foö^barbaz' },
{ 'id': 'SM:m.f.c_CHAR-3_SC-1',
'desc': 'move 1 character forward over character with combining diaeresis above',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'fo^öbarbaz',
'expected': 'foö^barbaz' },
{ 'id': 'SM:m.f.c_CHAR-4_SC-1',
'desc': 'move 1 character forward over character with combining diaeresis below',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'fo^o̤barbaz',
'expected': 'foo̤^barbaz' },
{ 'id': 'SM:m.f.c_CHAR-5_SC-1',
'desc': 'move 1 character forward over character with combining diaeresis above and below',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'fo^ö̤barbaz',
'expected': 'foö̤^barbaz' },
{ 'id': 'SM:m.f.c_CHAR-5_SI-1',
'desc': 'move 1 character forward over character with combining diaeresis above and below, selection on diaeresis above',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'foo[̈]̤barbaz',
'expected': 'foö̤^barbaz' },
{ 'id': 'SM:m.f.c_CHAR-5_SI-2',
'desc': 'move 1 character forward over character with combining diaeresis above and below, selection on diaeresis below',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'foö[̤]barbaz',
'expected': 'foö̤^barbaz' },
{ 'id': 'SM:m.f.c_CHAR-5_SL',
'desc': 'move 1 character forward over character with combining diaeresis above and below, selection oblique on diaeresis and preceding text',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'fo[ö]̤barbaz',
'expected': 'foö̤^barbaz' },
{ 'id': 'SM:m.f.c_CHAR-5_SR',
'desc': 'move 1 character forward over character with combining diaeresis above and below, selection oblique on diaeresis and following text',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'foö[̤bar]baz',
'expected': 'foö̤bar^baz' },
{ 'id': 'SM:m.f.c_CHAR-6_SC-1',
'desc': 'move 1 character forward over character with enclosing square',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'fo^o⃞barbaz',
'expected': 'foo⃞^barbaz' },
{ 'id': 'SM:m.f.c_CHAR-7_SC-1',
'desc': 'move 1 character forward over character with combining long solidus overlay',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'fo^o̸barbaz',
'expected': 'foo̸^barbaz' }
]
},
{ 'desc': 'sel.modify: move backward over combining diacritics, etc.',
'tests': [
{ 'id': 'SM:m.b.c_CHAR-2_SC-1',
'desc': 'move 1 character backward over combined o with diaeresis',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'foö^barbaz',
'expected': 'fo^öbarbaz' },
{ 'id': 'SM:m.b.c_CHAR-3_SC-1',
'desc': 'move 1 character backward over character with combining diaeresis above',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'foö^barbaz',
'expected': 'fo^öbarbaz' },
{ 'id': 'SM:m.b.c_CHAR-4_SC-1',
'desc': 'move 1 character backward over character with combining diaeresis below',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'foo̤^barbaz',
'expected': 'fo^o̤barbaz' },
{ 'id': 'SM:m.b.c_CHAR-5_SC-1',
'desc': 'move 1 character backward over character with combining diaeresis above and below',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'foö̤^barbaz',
'expected': 'fo^ö̤barbaz' },
{ 'id': 'SM:m.b.c_CHAR-5_SI-1',
'desc': 'move 1 character backward over character with combining diaeresis above and below, selection on diaeresis above',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'foo[̈]̤barbaz',
'expected': 'fo^ö̤barbaz' },
{ 'id': 'SM:m.b.c_CHAR-5_SI-2',
'desc': 'move 1 character backward over character with combining diaeresis above and below, selection on diaeresis below',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'foö[̤]barbaz',
'expected': 'fo^ö̤barbaz' },
{ 'id': 'SM:m.b.c_CHAR-5_SL',
'desc': 'move 1 character backward over character with combining diaeresis above and below, selection oblique on diaeresis and preceding text',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'fo[ö]̤barbaz',
'expected': 'fo^ö̤barbaz' },
{ 'id': 'SM:m.b.c_CHAR-5_SR',
'desc': 'move 1 character backward over character with combining diaeresis above and below, selection oblique on diaeresis and following text',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'foö[̤bar]baz',
'expected': 'fo^ö̤barbaz' },
{ 'id': 'SM:m.b.c_CHAR-6_SC-1',
'desc': 'move 1 character backward over character with enclosing square',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'foo⃞^barbaz',
'expected': 'fo^o⃞barbaz' },
{ 'id': 'SM:m.b.c_CHAR-7_SC-1',
'desc': 'move 1 character backward over character with combining long solidus overlay',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'foo̸^barbaz',
'expected': 'fo^o̸barbaz' }
]
},
{ 'desc': 'sel.modify: move forward/backward/left/right in RTL text',
'tests': [
{ 'id': 'SM:m.f.c_Pdir:rtl-1_SC-1',
'desc': 'move caret forward 1 character in right-to-left text',
'function': 'sel.modify("move", "forward", "character");',
'pad': '<p dir="rtl">foo b^ar baz</p>',
'expected': '<p dir="rtl">foo ba^r baz</p>' },
{ 'id': 'SM:m.b.c_Pdir:rtl-1_SC-1',
'desc': 'move caret backward 1 character in right-to-left text',
'function': 'sel.modify("move", "backward", "character");',
'pad': '<p dir="rtl">foo ba^r baz</p>',
'expected': '<p dir="rtl">foo b^ar baz</p>' },
{ 'id': 'SM:m.r.c_Pdir:rtl-1_SC-1',
'desc': 'move caret 1 character to the right in LTR text within RTL context',
'function': 'sel.modify("move", "right", "character");',
'pad': '<p dir="rtl">foo b^ar baz</p>',
'expected': '<p dir="rtl">foo ba^r baz</p>' },
{ 'id': 'SM:m.l.c_Pdir:rtl-1_SC-1',
'desc': 'move caret 1 character to the left in LTR text within RTL context',
'function': 'sel.modify("move", "left", "character");',
'pad': '<p dir="rtl">foo ba^r baz</p>',
'expected': '<p dir="rtl">foo b^ar baz</p>' },
{ 'id': 'SM:m.f.c_TEXT:ar-1_SC-1',
'desc': 'move caret forward 1 character in Arabic text',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'مرح^با العالم',
'expected': 'مرحب^ا العالم' },
{ 'id': 'SM:m.b.c_TEXT:ar-1_SC-1',
'desc': 'move caret backward 1 character in Arabic text',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'مرح^با العالم',
'expected': 'مر^حبا العالم' },
{ 'id': 'SM:m.f.c_TEXT:he-1_SC-1',
'desc': 'move caret forward 1 character in Hebrew text',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'של^ום עולם',
'expected': 'שלו^ם עולם' },
{ 'id': 'SM:m.b.c_TEXT:he-1_SC-1',
'desc': 'move caret backward 1 character in Hebrew text',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'של^ום עולם',
'expected': 'ש^לום עולם' },
{ 'id': 'SM:m.f.c_BDOdir:rtl-1_SC-1',
'desc': 'move caret forward 1 character inside <bdo>',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'foo <bdo dir="rtl">b^ar</bdo> baz',
'expected': 'foo <bdo dir="rtl">ba^r</bdo> baz' },
{ 'id': 'SM:m.b.c_BDOdir:rtl-1_SC-1',
'desc': 'move caret backward 1 character inside <bdo>',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'foo <bdo dir="rtl">ba^r</bdo> baz',
'expected': 'foo <bdo dir="rtl">b^ar</bdo> baz' },
{ 'id': 'SM:m.r.c_BDOdir:rtl-1_SC-1',
'desc': 'move caret 1 character to the right inside <bdo>',
'function': 'sel.modify("move", "right", "character");',
'pad': 'foo <bdo dir="rtl">ba^r</bdo> baz',
'expected': 'foo <bdo dir="rtl">b^ar</bdo> baz' },
{ 'id': 'SM:m.l.c_BDOdir:rtl-1_SC-1',
'desc': 'move caret 1 character to the left inside <bdo>',
'function': 'sel.modify("move", "left", "character");',
'pad': 'foo <bdo dir="rtl">b^ar</bdo> baz',
'expected': 'foo <bdo dir="rtl">ba^r</bdo> baz' },
{ 'id': 'SM:m.f.c_TEXTrle-1_SC-rtl-1',
'desc': 'move caret forward in RTL text within RLE-PDF marks',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'I said, "(RLE)‫car يعني سي^ارة‬(PDF)".',
'expected': 'I said, "(RLE)‫car يعني سيا^رة‬(PDF)".' },
{ 'id': 'SM:m.b.c_TEXTrle-1_SC-rtl-1',
'desc': 'move caret backward in RTL text within RLE-PDF marks',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'I said, "(RLE)‫car يعني سي^ارة‬(PDF)".',
'expected': 'I said, "(RLE)‫car يعني س^يارة‬(PDF)".' },
{ 'id': 'SM:m.r.c_TEXTrle-1_SC-rtl-1',
'desc': 'move caret 1 character to the right in RTL text within RLE-PDF marks',
'function': 'sel.modify("move", "right", "character");',
'pad': 'I said, "(RLE)‫car يعني سي^ارة‬(PDF)".',
'expected': 'I said, "(RLE)‫car يعني س^يارة‬(PDF)".' },
{ 'id': 'SM:m.l.c_TEXTrle-1_SC-rtl-1',
'desc': 'move caret 1 character to the left in RTL text within RLE-PDF marks',
'function': 'sel.modify("move", "left", "character");',
'pad': 'I said, "(RLE)‫car يعني سي^ارة‬(PDF)".',
'expected': 'I said, "(RLE)‫car يعني سيا^رة‬(PDF)".' },
{ 'id': 'SM:m.f.c_TEXTrle-1_SC-ltr-1',
'desc': 'move caret forward in LTR text within RLE-PDF marks',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'I said, "(RLE)‫c^ar يعني سيارة‬(PDF)".',
'expected': 'I said, "(RLE)‫ca^r يعني سيارة‬(PDF)".' },
{ 'id': 'SM:m.b.c_TEXTrle-1_SC-ltr-1',
'desc': 'move caret backward in LTR text within RLE-PDF marks',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'I said, "(RLE)‫ca^r يعني سيارة‬(PDF)".',
'expected': 'I said, "(RLE)‫c^ar يعني سيارة‬(PDF)".' },
{ 'id': 'SM:m.r.c_TEXTrle-1_SC-ltr-1',
'desc': 'move caret 1 character to the right in LTR text within RLE-PDF marks',
'function': 'sel.modify("move", "right", "character");',
'pad': 'I said, "(RLE)‫c^ar يعني سيارة‬(PDF)".',
'expected': 'I said, "(RLE)‫ca^r يعني سيارة‬(PDF)".' },
{ 'id': 'SM:m.l.c_TEXTrle-1_SC-ltr-1',
'desc': 'move caret 1 character to the left in LTR text within RLE-PDF marks',
'function': 'sel.modify("move", "left", "character");',
'pad': 'I said, "(RLE)‫ca^r يعني سيارة‬(PDF)".',
'expected': 'I said, "(RLE)‫c^ar يعني سيارة‬(PDF)".' },
{ 'id': 'SM:m.f.c_TEXTrlo-1_SC-rtl-1',
'desc': 'move caret forward in RTL text within RLO-PDF marks',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'I said, "(RLO)‮car يعني سي^ارة‬(PDF)".',
'expected': 'I said, "(RLO)‮car يعني سيا^رة‬(PDF)".' },
{ 'id': 'SM:m.b.c_TEXTrlo-1_SC-rtl-1',
'desc': 'move caret backward in RTL text within RLO-PDF marks',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'I said, "(RLO)‮car يعني سي^ارة‬(PDF)".',
'expected': 'I said, "(RLO)‮car يعني س^يارة‬(PDF)".' },
{ 'id': 'SM:m.r.c_TEXTrlo-1_SC-rtl-1',
'desc': 'move caret 1 character to the right in RTL text within RLO-PDF marks',
'function': 'sel.modify("move", "right", "character");',
'pad': 'I said, "(RLO)‮car يعني سي^ارة‬(PDF)".',
'expected': 'I said, "(RLO)‮car يعني س^يارة‬(PDF)".' },
{ 'id': 'SM:m.l.c_TEXTrlo-1_SC-rtl-1',
'desc': 'move caret 1 character to the left in RTL text within RLO-PDF marks',
'function': 'sel.modify("move", "left", "character");',
'pad': 'I said, "(RLO)‮car يعني سي^ارة‬(PDF)".',
'expected': 'I said, "(RLO)‮car يعني سيا^رة‬(PDF)".' },
{ 'id': 'SM:m.f.c_TEXTrlo-1_SC-ltr-1',
'desc': 'move caret forward in Latin text within RLO-PDF marks',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'I said, "(RLO)‮c^ar يعني سيارة‬(PDF)".',
'expected': 'I said, "(RLO)‮ca^r يعني سيارة‬(PDF)".' },
{ 'id': 'SM:m.b.c_TEXTrlo-1_SC-ltr-1',
'desc': 'move caret backward in Latin text within RLO-PDF marks',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'I said, "(RLO)‮ca^r يعني سيارة‬(PDF)".',
'expected': 'I said, "(RLO)‮c^ar يعني سيارة‬(PDF)".' },
{ 'id': 'SM:m.r.c_TEXTrlo-1_SC-ltr-1',
'desc': 'move caret 1 character to the right in Latin text within RLO-PDF marks',
'function': 'sel.modify("move", "right", "character");',
'pad': 'I said, "(RLO)‮ca^r يعني سيارة‬(PDF)".',
'expected': 'I said, "(RLO)‮c^ar يعني سيارة‬(PDF)".' },
{ 'id': 'SM:m.l.c_TEXTrlo-1_SC-ltr-1',
'desc': 'move caret 1 character to the left in Latin text within RLO-PDF marks',
'function': 'sel.modify("move", "left", "character");',
'pad': 'I said, "(RLO)‮c^ar يعني سيارة‬(PDF)".',
'expected': 'I said, "(RLO)‮ca^r يعني سيارة‬(PDF)".' },
{ 'id': 'SM:m.f.c_TEXTrlm-1_SC-1',
'desc': 'move caret forward in RTL text within neutral characters followed by RLM',
'function': 'sel.modify("move", "forward", "character");',
'pad': 'I said, "يعني سيارة!^?!‏(RLM)".',
'expected': 'I said, "يعني سيارة!?^!‏(RLM)".' },
{ 'id': 'SM:m.b.c_TEXTrlm-1_SC-1',
'desc': 'move caret backward in RTL text within neutral characters followed by RLM',
'function': 'sel.modify("move", "backward", "character");',
'pad': 'I said, "يعني سيارة!?^!‏(RLM)".',
'expected': 'I said, "يعني سيارة!^?!‏(RLM)".' },
{ 'id': 'SM:m.r.c_TEXTrlm-1_SC-1',
'desc': 'move caret 1 character to the right in RTL text within neutral characters followed by RLM',
'function': 'sel.modify("move", "right", "character");',
'pad': 'I said, "يعني سيارة!?^!‏(RLM)".',
'expected': 'I said, "يعني سيارة!^?!‏(RLM)".' },
{ 'id': 'SM:m.l.c_TEXTrlm-1_SC-1',
'desc': 'move caret 1 character to the left in RTL text within neutral characters followed by RLM',
'function': 'sel.modify("move", "left", "character");',
'pad': 'I said, "يعني سيارة!^?!‏(RLM)".',
'expected': 'I said, "يعني سيارة!?^!‏(RLM)".' }
]
},
{ 'desc': 'sel.modify: move forward/backward over words in Japanese text',
'tests': [
{ 'id': 'SM:m.f.w_TEXT-jp_SC-1',
'desc': 'move caret forward 1 word in Japanese text (adjective)',
'function': 'sel.modify("move", "forward", "word");',
'pad': '^面白い例文をテストしましょう。',
'expected': '面白い^例文をテストしましょう。' },
{ 'id': 'SM:m.f.w_TEXT-jp_SC-2',
'desc': 'move caret forward 1 word in Japanese text (in the middle of a word)',
'function': 'sel.modify("move", "forward", "word");',
'pad': '面^白い例文をテストしましょう。',
'expected': '面白い^例文をテストしましょう。' },
{ 'id': 'SM:m.f.w_TEXT-jp_SC-3',
'desc': 'move caret forward 1 word in Japanese text (noun)',
'function': 'sel.modify("move", "forward", "word");',
'pad': '面白い^例文をテストしましょう。',
'expected': [ '面白い例文^をテストしましょう。',
'面白い例文を^テストしましょう。' ] },
{ 'id': 'SM:m.f.w_TEXT-jp_SC-4',
'desc': 'move caret forward 1 word in Japanese text (Katakana)',
'function': 'sel.modify("move", "forward", "word");',
'pad': '面白い例文を^テストしましょう。',
'expected': '面白い例文をテスト^しましょう。' },
{ 'id': 'SM:m.f.w_TEXT-jp_SC-5',
'desc': 'move caret forward 1 word in Japanese text (verb)',
'function': 'sel.modify("move", "forward", "word");',
'pad': '面白い例文をテスト^しましょう。',
'expected': '面白い例文をテストしましょう^。' }
]
},
{ 'desc': 'sel.modify: extend selection forward',
'tests': [
{ 'id': 'SM:e.f.c_TEXT-1_SC-1',
'desc': 'extend selection 1 character forward',
'function': 'sel.modify("extend", "forward", "character");',
'pad': 'foo ^bar baz',
'expected': 'foo [b]ar baz' },
{ 'id': 'SM:e.f.c_TEXT-1_SI-1',
'desc': 'extend selection 1 character forward',
'function': 'sel.modify("extend", "forward", "character");',
'pad': 'foo [b]ar baz',
'expected': 'foo [ba]r baz' },
{ 'id': 'SM:e.f.w_TEXT-1_SC-1',
'desc': 'extend selection 1 word forward',
'function': 'sel.modify("extend", "forward", "word");',
'pad': 'foo ^bar baz',
'expected': 'foo [bar] baz' },
{ 'id': 'SM:e.f.w_TEXT-1_SI-1',
'desc': 'extend selection 1 word forward',
'function': 'sel.modify("extend", "forward", "word");',
'pad': 'foo [b]ar baz',
'expected': 'foo [bar] baz' },
{ 'id': 'SM:e.f.w_TEXT-1_SI-2',
'desc': 'extend selection 1 word forward',
'function': 'sel.modify("extend", "forward", "word");',
'pad': 'foo [bar] baz',
'expected': 'foo [bar baz]' }
]
},
{ 'desc': 'sel.modify: extend selection backward, shrinking it',
'tests': [
{ 'id': 'SM:e.b.c_TEXT-1_SI-2',
'desc': 'extend selection 1 character backward',
'function': 'sel.modify("extend", "backward", "character");',
'pad': 'foo [bar] baz',
'expected': 'foo [ba]r baz' },
{ 'id': 'SM:e.b.c_TEXT-1_SI-1',
'desc': 'extend selection 1 character backward',
'function': 'sel.modify("extend", "backward", "character");',
'pad': 'foo [b]ar baz',
'expected': 'foo ^bar baz' },
{ 'id': 'SM:e.b.w_TEXT-1_SI-3',
'desc': 'extend selection 1 word backward',
'function': 'sel.modify("extend", "backward", "word");',
'pad': 'foo [bar baz]',
'expected': 'foo [bar] baz' },
{ 'id': 'SM:e.b.w_TEXT-1_SI-2',
'desc': 'extend selection 1 word backward',
'function': 'sel.modify("extend", "backward", "word");',
'pad': 'foo [bar] baz',
'expected': 'foo ^bar baz' },
{ 'id': 'SM:e.b.w_TEXT-1_SI-4',
'desc': 'extend selection 1 word backward',
'function': 'sel.modify("extend", "backward", "word");',
'pad': 'foo b[ar baz]',
'expected': 'foo b[ar] baz' },
{ 'id': 'SM:e.b.w_TEXT-1_SI-5',
'desc': 'extend selection 1 word backward',
'function': 'sel.modify("extend", "backward", "word");',
'pad': 'foo b[ar] baz',
'expected': 'foo b^ar baz' }
]
},
{ 'desc': 'sel.modify: extend selection backward, creating or extending a reverse selections',
'tests': [
{ 'id': 'SM:e.b.c_TEXT-1_SC-1',
'desc': 'extend selection 1 character backward',
'function': 'sel.modify("extend", "backward", "character");',
'pad': 'foo b^ar baz',
'expected': 'foo ]b[ar baz' },
{ 'id': 'SM:e.b.c_TEXT-1_SIR-1',
'desc': 'extend selection 1 character backward',
'function': 'sel.modify("extend", "backward", "character");',
'pad': 'foo b]a[r baz',
'expected': 'foo ]ba[r baz' },
{ 'id': 'SM:e.b.w_TEXT-1_SIR-1',
'desc': 'extend selection 1 word backward',
'function': 'sel.modify("extend", "backward", "word");',
'pad': 'foo b]a[r baz',
'expected': 'foo ]ba[r baz' },
{ 'id': 'SM:e.b.w_TEXT-1_SIR-2',
'desc': 'extend selection 1 word backward',
'function': 'sel.modify("extend", "backward", "word");',
'pad': 'foo ]ba[r baz',
'expected': ']foo ba[r baz' }
]
},
{ 'desc': 'sel.modify: extend selection forward, shrinking a reverse selections',
'tests': [
{ 'id': 'SM:e.f.c_TEXT-1_SIR-1',
'desc': 'extend selection 1 character forward',
'function': 'sel.modify("extend", "forward", "character");',
'pad': 'foo b]a[r baz',
'expected': 'foo ba^r baz' },
{ 'id': 'SM:e.f.c_TEXT-1_SIR-2',
'desc': 'extend selection 1 character forward',
'function': 'sel.modify("extend", "forward", "character");',
'pad': 'foo ]ba[r baz',
'expected': 'foo b]a[r baz' },
{ 'id': 'SM:e.f.w_TEXT-1_SIR-1',
'desc': 'extend selection 1 word forward',
'function': 'sel.modify("extend", "forward", "word");',
'pad': 'foo ]ba[r baz',
'expected': 'foo ba^r baz' },
{ 'id': 'SM:e.f.w_TEXT-1_SIR-3',
'desc': 'extend selection 1 word forward',
'function': 'sel.modify("extend", "forward", "word");',
'pad': ']foo ba[r baz',
'expected': 'foo ]ba[r baz' }
]
},
{ 'desc': 'sel.modify: extend selection forward to line boundary',
'tests': [
{ 'id': 'SM:e.f.lb_BR.BR-1_SC-1',
'desc': 'extend selection forward to line boundary',
'function': 'sel.modify("extend", "forward", "lineboundary");',
'pad': 'fo^o<br>bar<br>baz',
'expected': 'fo[o]<br>bar<br>baz' },
{ 'id': 'SM:e.f.lb_BR.BR-1_SI-1',
'desc': 'extend selection forward to next line boundary',
'function': 'sel.modify("extend", "forward", "lineboundary");',
'pad': 'fo[o]<br>bar<br>baz',
'expected': 'fo[o<br>bar]<br>baz' },
{ 'id': 'SM:e.f.lb_BR.BR-1_SM-1',
'desc': 'extend selection forward to line boundary',
'function': 'sel.modify("extend", "forward", "lineboundary");',
'pad': 'fo[o<br>b]ar<br>baz',
'expected': 'fo[o<br>bar]<br>baz' },
{ 'id': 'SM:e.f.lb_P.P.P-1_SC-1',
'desc': 'extend selection forward to line boundary',
'function': 'sel.modify("extend", "forward", "lineboundary");',
'pad': '<p>fo^o</p><p>bar</p><p>baz</p>',
'expected': '<p>fo[o]</p><p>bar</p><p>baz</p>' },
{ 'id': 'SM:e.f.lb_P.P.P-1_SI-1',
'desc': 'extend selection forward to next line boundary',
'function': 'sel.modify("extend", "forward", "lineboundary");',
'pad': '<p>fo[o]</p><p>bar</p><p>baz</p>',
'expected': '<p>fo[o</p><p>bar]</p><p>baz</p>' },
{ 'id': 'SM:e.f.lb_P.P.P-1_SM-1',
'desc': 'extend selection forward to line boundary',
'function': 'sel.modify("extend", "forward", "lineboundary");',
'pad': '<p>fo[o</p><p>b]ar</p><p>baz</p>',
'expected': '<p>fo[o</p><p>bar]</p><p>baz</p>' },
{ 'id': 'SM:e.f.lb_P.P.P-1_SMR-1',
'desc': 'extend selection forward to line boundary',
'function': 'sel.modify("extend", "forward", "lineboundary");',
'pad': '<p>foo</p><p>b]a[r</p><p>baz</p>',
'expected': '<p>foo</p><p>ba[r]</p><p>baz</p>' }
]
},
{ 'desc': 'sel.modify: extend selection backward to line boundary',
'tests': [
{ 'id': 'SM:e.b.lb_BR.BR-1_SC-2',
'desc': 'extend selection backward to line boundary',
'function': 'sel.modify("extend", "backward", "lineboundary");',
'pad': 'foo<br>bar<br>b^az',
'expected': 'foo<br>bar<br>]b[az' },
{ 'id': 'SM:e.b.lb_BR.BR-1_SIR-2',
'desc': 'extend selection backward to previous line boundary',
'function': 'sel.modify("extend", "backward", "lineboundary");',
'pad': 'foo<br>bar<br>]b[az',
'expected': 'foo<br>]bar<br>b[az' },
{ 'id': 'SM:e.b.lb_BR.BR-1_SMR-2',
'desc': 'extend selection backward to line boundary',
'function': 'sel.modify("extend", "backward", "lineboundary");',
'pad': 'foo<br>ba]r<br>b[az',
'expected': 'foo<br>]bar<br>b[az' },
{ 'id': 'SM:e.b.lb_P.P.P-1_SC-2',
'desc': 'extend selection backward to line boundary',
'function': 'sel.modify("extend", "backward", "lineboundary");',
'pad': '<p>foo</p><p>bar</p><p>b^az</p>',
'expected': '<p>foo</p><p>bar</p><p>]b[az</p>' },
{ 'id': 'SM:e.b.lb_P.P.P-1_SIR-2',
'desc': 'extend selection backward to previous line boundary',
'function': 'sel.modify("extend", "backward", "lineboundary");',
'pad': '<p>foo</p><p>bar</p><p>]b[az</p>',
'expected': '<p>foo</p><p>]bar</p><p>b[az</p>' },
{ 'id': 'SM:e.b.lb_P.P.P-1_SMR-2',
'desc': 'extend selection backward to line boundary',
'function': 'sel.modify("extend", "backward", "lineboundary");',
'pad': '<p>foo</p><p>ba]r</p><p>b[az</p>',
'expected': '<p>foo</p><p>]bar</p><p>b[az</p>' },
{ 'id': 'SM:e.b.lb_P.P.P-1_SM-2',
'desc': 'extend selection backward to line boundary',
'function': 'sel.modify("extend", "backward", "lineboundary");',
'pad': '<p>foo</p><p>b[a]r</p><p>baz</p>',
'expected': '<p>foo</p><p>]b[ar</p><p>baz</p>' }
]
},
{ 'desc': 'sel.modify: extend selection forward to next line (NOTE: use identical text in every line!)',
'tests': [
{ 'id': 'SM:e.f.l_BR.BR-2_SC-1',
'desc': 'extend selection forward to next line',
'function': 'sel.modify("extend", "forward", "line");',
'pad': 'fo^o<br>foo<br>foo',
'expected': 'fo[o<br>fo]o<br>foo' },
{ 'id': 'SM:e.f.l_BR.BR-2_SI-1',
'desc': 'extend selection forward to next line',
'function': 'sel.modify("extend", "forward", "line");',
'pad': 'fo[o]<br>foo<br>foo',
'expected': 'fo[o<br>foo]<br>foo' },
{ 'id': 'SM:e.f.l_BR.BR-2_SM-1',
'desc': 'extend selection forward to next line',
'function': 'sel.modify("extend", "forward", "line");',
'pad': 'fo[o<br>f]oo<br>foo',
'expected': 'fo[o<br>foo<br>f]oo' },
{ 'id': 'SM:e.f.l_P.P-1_SC-1',
'desc': 'extend selection forward to next line over paragraph boundaries',
'function': 'sel.modify("extend", "forward", "line");',
'pad': '<p>foo^bar</p><p>foobar</p>',
'expected': '<p>foo[bar</p><p>foo]bar</p>' },
{ 'id': 'SM:e.f.l_P.P-1_SMR-1',
'desc': 'extend selection forward to next line over paragraph boundaries',
'function': 'sel.modify("extend", "forward", "line");',
'pad': '<p>fo]obar</p><p>foob[ar</p>',
'expected': '<p>foobar</p><p>fo]ob[ar</p>' }
]
},
{ 'desc': 'sel.modify: extend selection backward to previous line (NOTE: use identical text in every line!)',
'tests': [
{ 'id': 'SM:e.b.l_BR.BR-2_SC-2',
'desc': 'extend selection backward to previous line',
'function': 'sel.modify("extend", "backward", "line");',
'pad': 'foo<br>foo<br>f^oo',
'expected': 'foo<br>f]oo<br>f[oo' },
{ 'id': 'SM:e.b.l_BR.BR-2_SIR-2',
'desc': 'extend selection backward to previous line',
'function': 'sel.modify("extend", "backward", "line");',
'pad': 'foo<br>foo<br>]f[oo',
'expected': 'foo<br>]foo<br>f[oo' },
{ 'id': 'SM:e.b.l_BR.BR-2_SMR-2',
'desc': 'extend selection backward to previous line',
'function': 'sel.modify("extend", "backward", "line");',
'pad': 'foo<br>fo]o<br>f[oo',
'expected': 'fo]o<br>foo<br>f[oo' },
{ 'id': 'SM:e.b.l_P.P-1_SC-2',
'desc': 'extend selection backward to next line over paragraph boundaries',
'function': 'sel.modify("extend", "backward", "line");',
'pad': '<p>foobar</p><p>foo^bar</p>',
'expected': '<p>foo]bar</p><p>foo[bar</p>' },
{ 'id': 'SM:e.b.l_P.P-1_SM-1',
'desc': 'extend selection backward to next line over paragraph boundaries',
'function': 'sel.modify("extend", "backward", "line");',
'pad': '<p>fo[obar</p><p>foob]ar</p>',
'expected': '<p>fo[ob]ar</p><p>foobar</p>' }
]
},
{ 'desc': 'sel.selectAllChildren(<element>)',
'function': 'sel.selectAllChildren(doc.getElementById("div"));',
'tests': [
{ 'id': 'SAC:div_DIV-1_SC-1',
'desc': 'selectAllChildren(<div>)',
'pad': 'foo<div id="div">bar <span>ba^z</span></div>qoz',
'expected': [ 'foo<div id="div">[bar <span>baz</span>}</div>qoz',
'foo<div id="div">{bar <span>baz</span>}</div>qoz' ] },
]
}
]
}
| 54.298836
| 159
| 0.482429
| 5,049
| 41,973
| 3.965142
| 0.041592
| 0.055295
| 0.093407
| 0.07028
| 0.963836
| 0.953447
| 0.940509
| 0.925125
| 0.886613
| 0.835714
| 0
| 0.097282
| 0.293451
| 41,973
| 772
| 160
| 54.369171
| 0.577792
| 0
| 0
| 0.467692
| 0
| 0.109231
| 0.648567
| 0.201044
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
35a36da8907a666af8cd4a653fe71e43ff4b58d9
| 1,088
|
py
|
Python
|
src/nntm/datasets/__init__.py
|
suud/nntm
|
48ba15f3ec07d8787f86d8cf043481fa9f9cc328
|
[
"BSD-3-Clause"
] | 1
|
2021-12-19T17:09:51.000Z
|
2021-12-19T17:09:51.000Z
|
src/nntm/datasets/__init__.py
|
suud/nntm
|
48ba15f3ec07d8787f86d8cf043481fa9f9cc328
|
[
"BSD-3-Clause"
] | null | null | null |
src/nntm/datasets/__init__.py
|
suud/nntm
|
48ba15f3ec07d8787f86d8cf043481fa9f9cc328
|
[
"BSD-3-Clause"
] | null | null | null |
from ._numerai_main import (
fetch_numerai_training,
fetch_numerai_test,
fetch_numerai_validation,
fetch_numerai_live,
fetch_numerai_tournament,
fetch_numerai_example_predictions,
fetch_numerai_example_validation_predictions,
fetch_numerai_feature_metadata,
submit_numerai_tournament,
)
from ._numerai_main_meta import (
TARGET_NAMES_UNIQUE,
TARGET_NAMES,
FEATURE_NAMES_LEGACY,
FEATURE_NAMES_SMALL,
FEATURE_NAMES_MEDIUM,
COLUMN_NAMES_LEGACY,
COLUMN_NAMES_SMALL,
COLUMN_NAMES_MEDIUM,
)
__all__ = [
"fetch_numerai_training",
"fetch_numerai_test",
"fetch_numerai_validation",
"fetch_numerai_live",
"fetch_numerai_tournament",
"fetch_numerai_example_predictions",
"fetch_numerai_example_validation_predictions",
"fetch_numerai_feature_metadata",
"submit_numerai_tournament",
"TARGET_NAMES_UNIQUE",
"TARGET_NAMES",
"FEATURE_NAMES_LEGACY",
"FEATURE_NAMES_SMALL",
"FEATURE_NAMES_MEDIUM",
"COLUMN_NAMES_LEGACY",
"COLUMN_NAMES_SMALL",
"COLUMN_NAMES_MEDIUM",
]
| 25.904762
| 51
| 0.759191
| 118
| 1,088
| 6.29661
| 0.20339
| 0.258412
| 0.102288
| 0.067295
| 0.934051
| 0.934051
| 0.934051
| 0.934051
| 0.934051
| 0.934051
| 0
| 0
| 0.170037
| 1,088
| 41
| 52
| 26.536585
| 0.822813
| 0
| 0
| 0
| 0
| 0
| 0.352941
| 0.185662
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ea26300591cec8c85770c441e72f271f4c651422
| 185
|
py
|
Python
|
simpa/log/__init__.py
|
IMSY-DKFZ/simpa
|
b8bddcf43a4bff2564f0ec208dc511b82e49bfb4
|
[
"MIT"
] | 3
|
2022-03-14T15:40:09.000Z
|
2022-03-20T02:34:25.000Z
|
simpa/log/__init__.py
|
jgroehl/simpa
|
e56f0802e5a8555ee8bb139dd4f776025e7e9267
|
[
"MIT"
] | 3
|
2022-03-18T07:19:12.000Z
|
2022-03-30T12:15:19.000Z
|
simpa/log/__init__.py
|
IMSY-DKFZ/simpa
|
b8bddcf43a4bff2564f0ec208dc511b82e49bfb4
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
from .file_logger import Logger
| 30.833333
| 76
| 0.816216
| 23
| 185
| 6.521739
| 0.782609
| 0.28
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04908
| 0.118919
| 185
| 5
| 77
| 37
| 0.871166
| 0.783784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ea4092d92e4a080841f600ee3b31f015ac091630
| 8,508
|
py
|
Python
|
guardian/managers.py
|
vitan/django-guardian
|
8bcf490873b39600accb043350116d7efafde8d2
|
[
"BSD-2-Clause"
] | 1
|
2015-11-06T00:47:01.000Z
|
2015-11-06T00:47:01.000Z
|
guardian/managers.py
|
vitan/django-guardian
|
8bcf490873b39600accb043350116d7efafde8d2
|
[
"BSD-2-Clause"
] | null | null | null |
guardian/managers.py
|
vitan/django-guardian
|
8bcf490873b39600accb043350116d7efafde8d2
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from django.contrib.contenttypes.models import ContentType
from guardian.exceptions import ObjectNotPersisted
from guardian.models import Permission
import warnings
# TODO: consolidate UserObjectPermissionManager and GroupObjectPermissionManager
class BaseObjectPermissionManager(models.Manager):
def is_generic(self):
try:
self.model._meta.get_field('object_pk')
return True
except models.fields.FieldDoesNotExist:
return False
class UserObjectPermissionManager(BaseObjectPermissionManager):
def assign_perm(self, perm, user, obj):
"""
Assigns permission with given ``perm`` for an instance ``obj`` and
``user``.
"""
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
ctype = ContentType.objects.get_for_model(obj)
permission = Permission.objects.get(content_type=ctype, codename=perm)
kwargs = {'permission': permission, 'user': user}
if self.is_generic():
kwargs['content_type'] = ctype
kwargs['object_pk'] = obj.pk
else:
kwargs['content_object'] = obj
obj_perm, created = self.get_or_create(**kwargs)
return obj_perm
def assign(self, perm, user, obj):
""" Depreciated function name left in for compatibility"""
warnings.warn("UserObjectPermissionManager method 'assign' is being renamed to 'assign_perm'. Update your code accordingly as old name will be depreciated in 2.0 version.", DeprecationWarning)
return self.assign_perm(perm, user, obj)
def bulk_assign_perm(self, perm, users, objs):
"""
Assigns permission with given ``perm`` for bulk instances ``objs`` and
given ``users``.
"""
for obj in objs:
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted(
"Objects needs to be persisted first")
ctype = ContentType.objects.get_for_model(objs[0])
permission = Permission.objects.get(content_type=ctype, codename=perm)
if self.is_generic():
kwargs_list = [{'permission': permission,
'user': user,
'content_type': ctype,
'object_pk': obj.pk}
for obj in objs
for user in users]
else:
kwargs_list = [{'permission': permission,
'user': user,
'content_object': obj}
for obj in objs
for user in users]
return self.bulk_create([
self.model(**kwargs) for kwargs in kwargs_list
])
def remove_perm(self, perm, user, obj):
"""
Removes permission ``perm`` for an instance ``obj`` and given ``user``.
Please note that we do NOT fetch object permission from database - we
use ``Queryset.delete`` method for removing it. Main implication of this
is that ``post_delete`` signals would NOT be fired.
"""
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
filters = {
'permission__codename': perm,
'permission__content_type': ContentType.objects.get_for_model(obj),
'user': user,
}
if self.is_generic():
filters['object_pk'] = obj.pk
else:
filters['content_object__pk'] = obj.pk
self.filter(**filters).delete()
def bulk_remove_perm(self, perm, users, objs):
"""
Removes permission ``perm`` for instances ``objs`` and given ``users``.
Please note that we do NOT fetch objects permission from database - we
use ``Queryset.delete`` method for removing them. Main implication of this
is that ``post_delete`` signals would NOT be fired.
"""
for obj in objs:
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted(
"Objects needs to be persisted first")
filters = {
'permission__codename': perm,
'permission__content_type': ContentType.objects.get_for_model(objs[0]),
'user__in': users,
}
pk_list = [obj.pk for obj in objs]
if self.is_generic():
filters['object_pk__in'] = pk_list
else:
filters['content_object__pk__in'] = pk_list
self.filter(**filters).delete()
class GroupObjectPermissionManager(BaseObjectPermissionManager):
def assign_perm(self, perm, group, obj):
"""
Assigns permission with given ``perm`` for an instance ``obj`` and
``group``.
"""
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
ctype = ContentType.objects.get_for_model(obj)
permission = Permission.objects.get(content_type=ctype, codename=perm)
kwargs = {'permission': permission, 'group': group}
if self.is_generic():
kwargs['content_type'] = ctype
kwargs['object_pk'] = obj.pk
else:
kwargs['content_object'] = obj
obj_perm, created = self.get_or_create(**kwargs)
return obj_perm
def assign(self, perm, group, obj):
""" Depreciated function name left in for compatibility"""
warnings.warn("GroupObjectPermissionManager method 'assign' is being renamed to 'assign_perm'. Update your code accordingly as old name will be depreciated in 2.0 version.", DeprecationWarning)
return self.assign_perm(perm, group, obj)
def bulk_assign_perm(self, perm, groups, objs):
"""
Assigns permission with given ``perm`` for bulk instances ``objs`` and
given ``groups``.
"""
for obj in objs:
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted(
"Objects needs to be persisted first")
ctype = ContentType.objects.get_for_model(objs[0])
permission = Permission.objects.get(content_type=ctype, codename=perm)
if self.is_generic():
kwargs_list = [{'permission': permission,
'group': group,
'content_type': ctype,
'object_pk': obj.pk}
for obj in objs
for group in groups]
else:
kwargs_list = [{'permission': permission,
'group': group,
'content_object': obj}
for obj in objs
for group in groups]
return self.bulk_create([
self.model(**kwargs) for kwargs in kwargs_list
])
def remove_perm(self, perm, group, obj):
"""
Removes permission ``perm`` for an instance ``obj`` and given ``group``.
"""
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
filters = {
'permission__codename': perm,
'permission__content_type': ContentType.objects.get_for_model(obj),
'group': group,
}
if self.is_generic():
filters['object_pk'] = obj.pk
else:
filters['content_object__pk'] = obj.pk
self.filter(**filters).delete()
def bulk_remove_perm(self, perm, groups, objs):
"""
Removes permission ``perm`` for instances ``objs`` and given ``groups``.
"""
for obj in objs:
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted(
"Objects needs to be persisted first")
filters = {
'permission__codename': perm,
'permission__content_type': ContentType.objects.get_for_model(objs[0]),
'group__in': groups,
}
pk_list = [obj.pk for obj in objs]
if self.is_generic():
filters['object_pk__in'] = pk_list
else:
filters['content_object__pk__in'] = pk_list
self.filter(**filters).delete()
| 37.315789
| 201
| 0.572285
| 916
| 8,508
| 5.165939
| 0.136463
| 0.019019
| 0.016906
| 0.025359
| 0.870668
| 0.858411
| 0.830516
| 0.787405
| 0.764159
| 0.747887
| 0
| 0.001402
| 0.329455
| 8,508
| 227
| 202
| 37.480176
| 0.828046
| 0.141279
| 0
| 0.76129
| 0
| 0.012903
| 0.167449
| 0.027742
| 0
| 0
| 0
| 0.004405
| 0
| 1
| 0.070968
| false
| 0
| 0.03871
| 0
| 0.180645
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ea47c9a45b7dd9657a139fcbacf18a9a59ed2d3b
| 573
|
py
|
Python
|
Day 3/second_largest_in_list.py
|
tushartrip1010/100_days_code_py
|
ee74b429e98cdd8bdf8661cf987da67c9fee5a3e
|
[
"Apache-2.0"
] | null | null | null |
Day 3/second_largest_in_list.py
|
tushartrip1010/100_days_code_py
|
ee74b429e98cdd8bdf8661cf987da67c9fee5a3e
|
[
"Apache-2.0"
] | null | null | null |
Day 3/second_largest_in_list.py
|
tushartrip1010/100_days_code_py
|
ee74b429e98cdd8bdf8661cf987da67c9fee5a3e
|
[
"Apache-2.0"
] | null | null | null |
# Approach 1:
def Second_Largest(Arr):
Arr.remove(max(Arr))
return max(Arr)
a = []
n = int(input("How Many Elements: "))
print("Enter The Elements: ")
for i in range(0, n):
element = int(input())
a.append(element)
print(f"Second Largest: {Second_Largest(a)}")
# Approach 2:
def Second_Largest(Arr):
return sorted(Arr)[-2]
a = []
n = int(input("How Many Elements: "))
print("Enter The Elements: ")
for i in range(0, n):
element = int(input())
a.append(element)
print(f"Second Largest: {Second_Largest(a)}")
| 18.483871
| 46
| 0.602094
| 83
| 573
| 4.108434
| 0.349398
| 0.228739
| 0.093842
| 0.111437
| 0.709677
| 0.709677
| 0.709677
| 0.709677
| 0.709677
| 0.709677
| 0
| 0.011338
| 0.230366
| 573
| 30
| 47
| 19.1
| 0.761905
| 0.04014
| 0
| 0.842105
| 0
| 0
| 0.286267
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0
| 0.052632
| 0.210526
| 0.210526
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ea53ff63df2abc2ab3a24b4c22a1c3c4516f10bc
| 7,414
|
py
|
Python
|
constants.py
|
akk988/Coding_Challenge_Private
|
23c0067414e02f55d622e63003698d4ffbf31629
|
[
"Apache-2.0"
] | 1
|
2022-03-09T19:21:47.000Z
|
2022-03-09T19:21:47.000Z
|
constants.py
|
akk988/Coding_Challenge_Private
|
23c0067414e02f55d622e63003698d4ffbf31629
|
[
"Apache-2.0"
] | null | null | null |
constants.py
|
akk988/Coding_Challenge_Private
|
23c0067414e02f55d622e63003698d4ffbf31629
|
[
"Apache-2.0"
] | null | null | null |
# Variables to be used for developing and testing the program
INTERVALS = [[1, 2], [1, 7], [8, 11], [11, 14],
[8, 10], [32, 36], [33, 35], [105, 108]]
"""
INTERVALS = [[1, 2], [1, 7], [8, 11], [11, 14],
[8, 10], [32, 326], [33, 345], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 103], [32, 336], [33, 355], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 104], [32, 346], [33, 365], [105, 1048], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 105], [32, 356], [33, 375], [105, 1058], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 106], [32, 366], [33, 385], [105, 10680], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 107], [32, 376], [33, 395], [105, 1078]]
"""
"""
INTERVALS = [[1, 2], [1, 7], [8, 11], [11, 14],
[8, 10], [32, 326], [33, 345], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 103], [32, 336], [33, 355], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 104], [32, 346], [33, 365], [105, 1048], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 105], [32, 356], [33, 375], [105, 1058], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 106], [32, 366], [33, 385], [105, 10680], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 107], [32, 376], [33, 395], [105, 1078], [32, 326], [33, 345], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 103], [32, 336], [33, 355], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 104], [32, 346], [33, 365], [105, 1048], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 105], [32, 356], [33, 375], [105, 1058], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 106], [32, 366], [33, 385], [105, 10680], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 107], [32, 376], [33, 395], [105, 1078], [32, 326], [33, 345], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 103], [32, 336], [33, 355], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 104], [32, 346], [33, 365], [105, 1048], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 105], [32, 356], [33, 375], [105, 1058], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 106], [32, 366], [33, 385], [105, 10680], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 107], [32, 376], [33, 395], [105, 1078], [32, 326], [33, 345], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 103], [32, 336], [33, 355], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 104], [32, 346], [33, 365], [105, 1048], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 105], [32, 356], [33, 375], [105, 1058], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 106], [32, 366], [33, 385], [105, 10680], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 107], [32, 376], [33, 395], [105, 1078], [32, 326], [33, 345], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 103], [32, 336], [33, 355], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 104], [32, 346], [33, 365], [105, 1048], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 105], [32, 356], [33, 375], [105, 1058], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 106], [32, 366], [33, 385], [105, 10680], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 107], [32, 376], [33, 395], [105, 1078], [32, 326], [33, 345], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 103], [32, 336], [33, 355], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 104], [32, 346], [33, 365], [105, 1048], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 105], [32, 356], [33, 375], [105, 1058], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 106], [32, 366], [33, 385], [105, 10680], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 107], [32, 376], [33, 395], [105, 1078], [32, 326], [33, 345], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 103], [32, 336], [33, 355], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 104], [32, 346], [33, 365], [105, 1048], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 105], [32, 356], [33, 375], [105, 1058], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 106], [32, 366], [33, 385], [105, 10680], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 107], [32, 376], [33, 395], [105, 1078], [32, 326], [33, 345], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 103], [32, 336], [33, 355], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 104], [32, 346], [33, 365], [105, 1048], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 105], [32, 356], [33, 375], [105, 1058], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 106], [32, 366], [33, 385], [105, 10680], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 107], [32, 376], [33, 395], [105, 1078], [32, 326], [33, 345], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 103], [32, 336], [33, 355], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 104], [32, 346], [33, 365], [105, 1048], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 105], [32, 356], [33, 375], [105, 1058], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 106], [32, 366], [33, 385], [105, 10680], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 107], [32, 376], [33, 395], [105, 1078], [32, 326], [33, 345], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 103], [32, 336], [33, 355], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 104], [32, 346], [33, 365], [105, 1048], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 105], [32, 356], [33, 375], [105, 1058], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 106], [32, 366], [33, 385], [105, 10680], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 107], [32, 376], [33, 395], [105, 1078], [32, 326], [33, 345], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 103], [32, 336], [33, 355], [105, 1038], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 104], [32, 346], [33, 365], [105, 1048], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 105], [32, 356], [33, 375], [105, 1058], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 106], [32, 366], [33, 385], [105, 10680], [
1, 2], [1, 7], [8, 11], [11, 14],
[8, 107], [32, 376], [33, 395], [105, 1078]]
"""
| 55.744361
| 94
| 0.306043
| 1,081
| 7,414
| 2.098982
| 0.041628
| 0.055531
| 0.083297
| 0.111062
| 0.972675
| 0.972675
| 0.972675
| 0.972675
| 0.972675
| 0.972675
| 0
| 0.495479
| 0.403291
| 7,414
| 132
| 95
| 56.166667
| 0.017405
| 0.007958
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ea74b92df6802370268e213282212f9d121f8df6
| 5,604
|
py
|
Python
|
gocardless_pro/resources/payer_authorisation.py
|
gdvalderrama/gocardless-pro-python
|
0ff8001f5bba11673c4fa0f30d26eca61a1219ba
|
[
"MIT"
] | 30
|
2015-07-08T21:10:10.000Z
|
2022-02-17T10:08:55.000Z
|
gocardless_pro/resources/payer_authorisation.py
|
gdvalderrama/gocardless-pro-python
|
0ff8001f5bba11673c4fa0f30d26eca61a1219ba
|
[
"MIT"
] | 21
|
2015-12-14T02:24:52.000Z
|
2022-02-05T15:56:00.000Z
|
gocardless_pro/resources/payer_authorisation.py
|
gdvalderrama/gocardless-pro-python
|
0ff8001f5bba11673c4fa0f30d26eca61a1219ba
|
[
"MIT"
] | 19
|
2016-02-10T15:57:42.000Z
|
2022-02-05T10:21:05.000Z
|
# WARNING: Do not edit by hand, this file was generated by Crank:
#
# https://github.com/gocardless/crank
#
class PayerAuthorisation(object):
"""A thin wrapper around a payer_authorisation, providing easy access to its
attributes.
Example:
payer_authorisation = client.payer_authorisations.get()
payer_authorisation.id
"""
def __init__(self, attributes, api_response):
self.attributes = attributes
self.api_response = api_response
@property
def bank_account(self):
return self.BankAccount(self.attributes.get('bank_account'))
@property
def created_at(self):
return self.attributes.get('created_at')
@property
def customer(self):
return self.Customer(self.attributes.get('customer'))
@property
def id(self):
return self.attributes.get('id')
@property
def incomplete_fields(self):
return self.attributes.get('incomplete_fields')
@property
def links(self):
return self.Links(self.attributes.get('links'))
@property
def mandate(self):
return self.Mandate(self.attributes.get('mandate'))
@property
def status(self):
return self.attributes.get('status')
class BankAccount(object):
"""Wrapper for the response's 'bank_account' attribute."""
def __init__(self, attributes):
self.attributes = attributes
@property
def account_holder_name(self):
return self.attributes.get('account_holder_name')
@property
def account_number(self):
return self.attributes.get('account_number')
@property
def account_number_ending(self):
return self.attributes.get('account_number_ending')
@property
def account_number_suffix(self):
return self.attributes.get('account_number_suffix')
@property
def account_type(self):
return self.attributes.get('account_type')
@property
def bank_code(self):
return self.attributes.get('bank_code')
@property
def branch_code(self):
return self.attributes.get('branch_code')
@property
def country_code(self):
return self.attributes.get('country_code')
@property
def currency(self):
return self.attributes.get('currency')
@property
def iban(self):
return self.attributes.get('iban')
@property
def metadata(self):
return self.attributes.get('metadata')
class Customer(object):
"""Wrapper for the response's 'customer' attribute."""
def __init__(self, attributes):
self.attributes = attributes
@property
def address_line1(self):
return self.attributes.get('address_line1')
@property
def address_line2(self):
return self.attributes.get('address_line2')
@property
def address_line3(self):
return self.attributes.get('address_line3')
@property
def city(self):
return self.attributes.get('city')
@property
def company_name(self):
return self.attributes.get('company_name')
@property
def country_code(self):
return self.attributes.get('country_code')
@property
def danish_identity_number(self):
return self.attributes.get('danish_identity_number')
@property
def email(self):
return self.attributes.get('email')
@property
def family_name(self):
return self.attributes.get('family_name')
@property
def given_name(self):
return self.attributes.get('given_name')
@property
def locale(self):
return self.attributes.get('locale')
@property
def metadata(self):
return self.attributes.get('metadata')
@property
def postal_code(self):
return self.attributes.get('postal_code')
@property
def region(self):
return self.attributes.get('region')
@property
def swedish_identity_number(self):
return self.attributes.get('swedish_identity_number')
class Links(object):
"""Wrapper for the response's 'links' attribute."""
def __init__(self, attributes):
self.attributes = attributes
@property
def bank_account(self):
return self.attributes.get('bank_account')
@property
def customer(self):
return self.attributes.get('customer')
@property
def mandate(self):
return self.attributes.get('mandate')
class Mandate(object):
"""Wrapper for the response's 'mandate' attribute."""
def __init__(self, attributes):
self.attributes = attributes
@property
def metadata(self):
return self.attributes.get('metadata')
@property
def payer_ip_address(self):
return self.attributes.get('payer_ip_address')
@property
def reference(self):
return self.attributes.get('reference')
@property
def scheme(self):
return self.attributes.get('scheme')
| 23.745763
| 80
| 0.58137
| 561
| 5,604
| 5.654189
| 0.15508
| 0.225095
| 0.180958
| 0.27995
| 0.60971
| 0.494956
| 0.287516
| 0.181274
| 0.181274
| 0.16425
| 0
| 0.001581
| 0.322627
| 5,604
| 235
| 81
| 23.846809
| 0.834036
| 0.085118
| 0
| 0.47482
| 1
| 0
| 0.086879
| 0.017139
| 0
| 0
| 0
| 0
| 0
| 1
| 0.330935
| false
| 0
| 0
| 0.294964
| 0.661871
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
ea77abae12829de0b3b5eef257e35591123dc5a4
| 7,441
|
py
|
Python
|
tests/charts-out/test_graphics_charts_barcharts_sampleV2b.py
|
debragail/reportlab-mirror
|
1e5814e1313ed50d5abb65487b207711cb4f7595
|
[
"BSD-3-Clause"
] | 1
|
2020-05-21T23:34:55.000Z
|
2020-05-21T23:34:55.000Z
|
tests/charts-out/test_graphics_charts_barcharts_sampleV2b.py
|
debragail/reportlab-mirror
|
1e5814e1313ed50d5abb65487b207711cb4f7595
|
[
"BSD-3-Clause"
] | null | null | null |
tests/charts-out/test_graphics_charts_barcharts_sampleV2b.py
|
debragail/reportlab-mirror
|
1e5814e1313ed50d5abb65487b207711cb4f7595
|
[
"BSD-3-Clause"
] | null | null | null |
#Autogenerated by ReportLab guiedit do not edit
from reportlab.graphics.shapes import _DrawingEditorMixin, Drawing, Group, Rect, Line, String
from reportlab.lib.colors import Color, CMYKColor, PCMYKColor
class ExplodedDrawing_Drawing(_DrawingEditorMixin,Drawing):
def __init__(self,width=400,height=200,*args,**kw):
Drawing.__init__(self,width,height,*args,**kw)
self.transform = (1,0,0,1,0,0)
self.add(Rect(58.57143,110,17.14286,9.6,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(118.5714,110,17.14286,-22.8,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(178.5714,110,17.14286,8,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(238.5714,110,17.14286,20,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(298.5714,110,17.14286,36.8,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(84.28571,110,17.14286,2.4,rx=0,ry=0,fillColor=Color(0,.501961,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(144.2857,110,17.14286,-19.6,rx=0,ry=0,fillColor=Color(0,.501961,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(204.2857,110,17.14286,-12,rx=0,ry=0,fillColor=Color(0,.501961,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(264.2857,110,17.14286,16,rx=0,ry=0,fillColor=Color(0,.501961,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(324.2857,110,17.14286,27.2,rx=0,ry=0,fillColor=Color(0,.501961,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,49,350,49,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,49,50,44,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(110,49,110,44,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(170,49,170,44,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(230,49,230,44,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(290,49,290,44,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(350,49,350,44,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (1,0,0,1,80,-11)
v0.add(String(-15.344,-8,'Q3 2000',textAnchor='start',fontName='Helvetica',fontSize=8,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,140,-11)
v0.add(String(-22.456,-8,'Year to Date',textAnchor='start',fontName='Helvetica',fontSize=8,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,200,-11)
v0.add(String(-18.676,-8,'12 months',textAnchor='start',fontName='Helvetica',fontSize=8,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,260,-11)
v0.add(String(-19.788,-8,'Annualised',textAnchor='start',fontName='Helvetica',fontSize=8,fillColor=Color(0,0,0,1)))
v0.add(String(-19.788,-17.6,'3 years',textAnchor='start',fontName='Helvetica',fontSize=8,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,320,-11)
v0.add(String(-26.684,-8,'Since 07.10.99',textAnchor='start',fontName='Helvetica',fontSize=8,fillColor=Color(0,0,0,1)))
self.add(Line(50,50,50,170,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,50,45,50,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,70,45,70,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,90,45,90,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,110,45,110,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,130,45,130,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,150,45,150,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,170,45,170,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,50)
v0.add(String(0,-8,'-15',textAnchor='middle',fontName='Helvetica',fontSize=8,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,70)
v0.add(String(0,-8,'-10',textAnchor='middle',fontName='Helvetica',fontSize=8,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,90)
v0.add(String(0,-8,'-5',textAnchor='middle',fontName='Helvetica',fontSize=8,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,110)
v0.add(String(0,-8,'0',textAnchor='middle',fontName='Helvetica',fontSize=8,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,130)
v0.add(String(0,-8,'5',textAnchor='middle',fontName='Helvetica',fontSize=8,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,150)
v0.add(String(0,-8,'10',textAnchor='middle',fontName='Helvetica',fontSize=8,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,170)
v0.add(String(0,-8,'15',textAnchor='middle',fontName='Helvetica',fontSize=8,fillColor=Color(0,0,0,1)))
if __name__=="__main__": #NORUNTESTS
ExplodedDrawing_Drawing().save(formats=['pdf'],outDir='.',fnRoot=None)
| 99.213333
| 236
| 0.768579
| 1,231
| 7,441
| 4.619821
| 0.109667
| 0.03341
| 0.029541
| 0.053455
| 0.855108
| 0.842272
| 0.842272
| 0.84192
| 0.84192
| 0.84192
| 0
| 0.128933
| 0.034807
| 7,441
| 74
| 237
| 100.554054
| 0.662907
| 0.007526
| 0
| 0.171429
| 1
| 0
| 0.037112
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014286
| false
| 0
| 0.028571
| 0
| 0.057143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.