hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8431f368dd6d05f9b346bf8fb09cb553daaa54ef
| 39
|
py
|
Python
|
elementalcms/persistence/models/__init__.py
|
paranoid-software/elemental-cms
|
7f09f9cd5498577d23fa70d1a51497b9de232598
|
[
"MIT"
] | 3
|
2022-01-12T09:11:54.000Z
|
2022-02-24T22:39:11.000Z
|
elementalcms/persistence/models/__init__.py
|
paranoid-software/elemental-cms
|
7f09f9cd5498577d23fa70d1a51497b9de232598
|
[
"MIT"
] | null | null | null |
elementalcms/persistence/models/__init__.py
|
paranoid-software/elemental-cms
|
7f09f9cd5498577d23fa70d1a51497b9de232598
|
[
"MIT"
] | 1
|
2022-01-12T09:11:56.000Z
|
2022-01-12T09:11:56.000Z
|
from .mongosession import MongoSession
| 19.5
| 38
| 0.871795
| 4
| 39
| 8.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8454320a419654be3889f256b14a91c83e6ad01a
| 47
|
py
|
Python
|
utils/print-version.py
|
ccicconetti/qiskit-tests
|
06d31c308e237f9fc73c24f0eb6295e67eb7bc04
|
[
"MIT"
] | null | null | null |
utils/print-version.py
|
ccicconetti/qiskit-tests
|
06d31c308e237f9fc73c24f0eb6295e67eb7bc04
|
[
"MIT"
] | null | null | null |
utils/print-version.py
|
ccicconetti/qiskit-tests
|
06d31c308e237f9fc73c24f0eb6295e67eb7bc04
|
[
"MIT"
] | null | null | null |
import qiskit
print(qiskit.__qiskit_version__)
| 15.666667
| 32
| 0.87234
| 6
| 47
| 6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 47
| 3
| 32
| 15.666667
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
845cc268de6283531dd9f0a4c1b6c65634a21bf3
| 47
|
py
|
Python
|
accounting/blueprints/user/__init__.py
|
alvin-c-cruz/accounting
|
f16ef16ded3cab36eee7227008ae40856680034d
|
[
"MIT"
] | 1
|
2022-02-05T13:57:40.000Z
|
2022-02-05T13:57:40.000Z
|
accounting/blueprints/user/__init__.py
|
alvin-c-cruz/accounting
|
f16ef16ded3cab36eee7227008ae40856680034d
|
[
"MIT"
] | null | null | null |
accounting/blueprints/user/__init__.py
|
alvin-c-cruz/accounting
|
f16ef16ded3cab36eee7227008ae40856680034d
|
[
"MIT"
] | null | null | null |
from .views import bp
from .models import User
| 15.666667
| 24
| 0.787234
| 8
| 47
| 4.625
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 47
| 2
| 25
| 23.5
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0824d0b7abcd6ecdba1c8828eea2691d8e3b897b
| 41
|
py
|
Python
|
interpolML/interpolML/model/__init__.py
|
MiguelMque/interpolML
|
980d55583285ba1d289de69b5c05c65fc34097f5
|
[
"MIT"
] | null | null | null |
interpolML/interpolML/model/__init__.py
|
MiguelMque/interpolML
|
980d55583285ba1d289de69b5c05c65fc34097f5
|
[
"MIT"
] | null | null | null |
interpolML/interpolML/model/__init__.py
|
MiguelMque/interpolML
|
980d55583285ba1d289de69b5c05c65fc34097f5
|
[
"MIT"
] | null | null | null |
from interpolML.model.model import Model
| 20.5
| 40
| 0.853659
| 6
| 41
| 5.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f23c96c56117881148ba909e3ddd19cce63a7075
| 30
|
py
|
Python
|
http_requests/__init__.py
|
JoseVL92/http_requests
|
4c964c93a8e4511891cb262f37b5432a92f8a72b
|
[
"MIT"
] | null | null | null |
http_requests/__init__.py
|
JoseVL92/http_requests
|
4c964c93a8e4511891cb262f37b5432a92f8a72b
|
[
"MIT"
] | null | null | null |
http_requests/__init__.py
|
JoseVL92/http_requests
|
4c964c93a8e4511891cb262f37b5432a92f8a72b
|
[
"MIT"
] | null | null | null |
from .async_requests import *
| 15
| 29
| 0.8
| 4
| 30
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f25f04a2402888c5d75ccb9cb3ccce1828483fd6
| 17
|
py
|
Python
|
wouso/core/scoring/__init__.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 117
|
2015-01-02T18:07:33.000Z
|
2021-01-06T22:36:25.000Z
|
wouso/core/scoring/__init__.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 229
|
2015-01-12T07:07:58.000Z
|
2019-10-12T08:27:01.000Z
|
wouso/core/scoring/__init__.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 96
|
2015-01-07T05:26:09.000Z
|
2020-06-25T07:28:51.000Z
|
from sm import *
| 8.5
| 16
| 0.705882
| 3
| 17
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 17
| 1
| 17
| 17
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4b497655ae927abaf357f300c63c77e851bb8051
| 34
|
py
|
Python
|
code/filter/__init__.py
|
bunert/Soccer-Analysis
|
df307f05beb4b1a4ee99e4e04dd8bbefe2541d27
|
[
"BSD-2-Clause"
] | 2
|
2021-02-28T18:37:19.000Z
|
2021-03-02T03:56:04.000Z
|
code/filter/__init__.py
|
bunert/Soccer-Analysis
|
df307f05beb4b1a4ee99e4e04dd8bbefe2541d27
|
[
"BSD-2-Clause"
] | null | null | null |
code/filter/__init__.py
|
bunert/Soccer-Analysis
|
df307f05beb4b1a4ee99e4e04dd8bbefe2541d27
|
[
"BSD-2-Clause"
] | null | null | null |
from .kalman_filter import Kalman
| 17
| 33
| 0.852941
| 5
| 34
| 5.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4b71b7a391a3a44a90027cdb82e8667a61d62e63
| 146
|
py
|
Python
|
mmdet/ops/psroi_pool/__init__.py
|
Nitin-Mane/MMDET
|
7410b25f27c200719482955cb4a8a1c381e67e04
|
[
"Apache-2.0"
] | 2
|
2019-10-15T09:42:33.000Z
|
2020-02-05T11:37:09.000Z
|
mmdet/ops/psroi_pool/__init__.py
|
Nitin-Mane/MMDET
|
7410b25f27c200719482955cb4a8a1c381e67e04
|
[
"Apache-2.0"
] | null | null | null |
mmdet/ops/psroi_pool/__init__.py
|
Nitin-Mane/MMDET
|
7410b25f27c200719482955cb4a8a1c381e67e04
|
[
"Apache-2.0"
] | 1
|
2020-07-31T18:42:13.000Z
|
2020-07-31T18:42:13.000Z
|
from .psroi_pool import psroi_pool, PSRoIPool, PSRoIPoolAfterPointwiseConv
__all__ = ['psroi_pool', 'PSRoIPool', 'PSRoIPoolAfterPointwiseConv']
| 48.666667
| 75
| 0.815068
| 13
| 146
| 8.615385
| 0.538462
| 0.241071
| 0.321429
| 0.803571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089041
| 146
| 3
| 76
| 48.666667
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0.317241
| 0.186207
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4b78e62190c8dd3eb553992e3f79d7fdce7beada
| 34
|
py
|
Python
|
server/blueprints/result/__init__.py
|
mmaltsev/onti40
|
4b00e7130e2dece80afd9680b38ebc311c1d60f5
|
[
"MIT"
] | null | null | null |
server/blueprints/result/__init__.py
|
mmaltsev/onti40
|
4b00e7130e2dece80afd9680b38ebc311c1d60f5
|
[
"MIT"
] | null | null | null |
server/blueprints/result/__init__.py
|
mmaltsev/onti40
|
4b00e7130e2dece80afd9680b38ebc311c1d60f5
|
[
"MIT"
] | null | null | null |
from .result import result_handler
| 34
| 34
| 0.882353
| 5
| 34
| 5.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 34
| 1
| 34
| 34
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4b9928566edc7e339839e4ad85ead7a62ee62166
| 72
|
py
|
Python
|
__init__.py
|
dreamer221/text_cnn_model_pb
|
e532e6106d2e877867e09e7ef1d18d6ad6a56d49
|
[
"MIT"
] | null | null | null |
__init__.py
|
dreamer221/text_cnn_model_pb
|
e532e6106d2e877867e09e7ef1d18d6ad6a56d49
|
[
"MIT"
] | null | null | null |
__init__.py
|
dreamer221/text_cnn_model_pb
|
e532e6106d2e877867e09e7ef1d18d6ad6a56d49
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2020/1/15 8:45
# @Author : WngXng
| 18
| 27
| 0.5
| 11
| 72
| 3.272727
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.203704
| 0.25
| 72
| 3
| 28
| 24
| 0.462963
| 0.902778
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4bc5ffebb607f8c46f36357ae63aed757f6a0014
| 136
|
py
|
Python
|
scripts/npc/autogen_9201357.py
|
hsienjan/SideQuest-Server
|
3e88debaf45615b759d999255908f99a15283695
|
[
"MIT"
] | null | null | null |
scripts/npc/autogen_9201357.py
|
hsienjan/SideQuest-Server
|
3e88debaf45615b759d999255908f99a15283695
|
[
"MIT"
] | null | null | null |
scripts/npc/autogen_9201357.py
|
hsienjan/SideQuest-Server
|
3e88debaf45615b759d999255908f99a15283695
|
[
"MIT"
] | null | null | null |
# Character field ID when accessed: 820000000
# ParentID: 9201357
# ObjectID: 1000006
# Object Position X: 790
# Object Position Y: 297
| 22.666667
| 45
| 0.757353
| 18
| 136
| 5.722222
| 0.888889
| 0.271845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.256637
| 0.169118
| 136
| 5
| 46
| 27.2
| 0.654867
| 0.919118
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
29bebee15b120a0219ebed5fc798f03b83f76870
| 93
|
py
|
Python
|
agent/src/agent/cli/prompt/pipeline/tcp.py
|
eacherkan-aternity/daria
|
7c77a2f52c09c852017b16949a848fa51f0fb579
|
[
"Apache-2.0"
] | 16
|
2019-04-03T08:31:54.000Z
|
2021-01-24T17:12:04.000Z
|
agent/src/agent/cli/prompt/pipeline/tcp.py
|
eacherkan-aternity/daria
|
7c77a2f52c09c852017b16949a848fa51f0fb579
|
[
"Apache-2.0"
] | 10
|
2020-01-20T14:59:06.000Z
|
2022-01-21T10:19:16.000Z
|
agent/src/agent/cli/prompt/pipeline/tcp.py
|
eacherkan-aternity/daria
|
7c77a2f52c09c852017b16949a848fa51f0fb579
|
[
"Apache-2.0"
] | 5
|
2021-01-08T19:23:03.000Z
|
2021-11-09T14:15:49.000Z
|
from .schemaless import SchemalessPrompter
class TCPPrompter(SchemalessPrompter):
pass
| 15.5
| 42
| 0.817204
| 8
| 93
| 9.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139785
| 93
| 5
| 43
| 18.6
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
29bfd7388c47263cc5f70a8a61e1dec1614083c5
| 55
|
py
|
Python
|
jsoncleaner/transforms/transformations.py
|
realslimkarthik/Json_Cleaner
|
3c130e574364eb91004adbf5889e4a2c7b00a6a0
|
[
"MIT"
] | null | null | null |
jsoncleaner/transforms/transformations.py
|
realslimkarthik/Json_Cleaner
|
3c130e574364eb91004adbf5889e4a2c7b00a6a0
|
[
"MIT"
] | null | null | null |
jsoncleaner/transforms/transformations.py
|
realslimkarthik/Json_Cleaner
|
3c130e574364eb91004adbf5889e4a2c7b00a6a0
|
[
"MIT"
] | 1
|
2020-06-12T15:03:14.000Z
|
2020-06-12T15:03:14.000Z
|
def default_callback(key, val):
return {key: val}
| 13.75
| 31
| 0.672727
| 8
| 55
| 4.5
| 0.75
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 55
| 3
| 32
| 18.333333
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
29dbc08c9856228b3e65af8db4cd5e1ac1e8ad74
| 8,871
|
py
|
Python
|
tests/test_xsd.py
|
johnduarte/pytest-rpc
|
cba9958749421befe3a35c8a3d90b9264f0bc040
|
[
"Apache-2.0"
] | null | null | null |
tests/test_xsd.py
|
johnduarte/pytest-rpc
|
cba9958749421befe3a35c8a3d90b9264f0bc040
|
[
"Apache-2.0"
] | 1
|
2018-07-24T18:09:18.000Z
|
2018-07-24T18:09:18.000Z
|
tests/test_xsd.py
|
johnduarte/pytest-rpc
|
cba9958749421befe3a35c8a3d90b9264f0bc040
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Test cases for the 'get_xsd' utility function for retrieving the XSD for the project."""
# ======================================================================================================================
# Imports
# ======================================================================================================================
from __future__ import absolute_import
from lxml import etree
from pytest_rpc import MK8S_ENV_VARS, ASC_ENV_VARS, get_xsd
from tests.conftest import run_and_parse
from tests.conftest import run_and_parse_with_config
# ======================================================================================================================
# Globals
# ======================================================================================================================
TEST_ENV_VARS = list(ASC_ENV_VARS) # Shallow copy.
MK8S_TEST_ENV_VARS = list(MK8S_ENV_VARS) # Shallow copy.
# ======================================================================================================================
# Tests
# ======================================================================================================================
def test_happy_path_asc(testdir, properly_decorated_test_function):
"""Verify that 'get_xsd' returns an XSD stream that can be used to validate JUnitXML."""
# Setup
testdir.makepyfile(properly_decorated_test_function.format(test_name='test_happy_path',
test_id='123e4567-e89b-12d3-a456-426655440000',
jira_id='ASC-123'))
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
xmlschema.assertValid(xml_doc)
def test_happy_path_mk8s(testdir, properly_decorated_test_function):
"""Verify that 'get_xsd' returns an XSD stream that can be used to validate JUnitXML when configured with mk8s."""
# Setup
testdir.makepyfile(properly_decorated_test_function.format(test_name='test_happy_path',
test_id='123e4567-e89b-12d3-a456-426655440000',
jira_id='ASC-123'))
config = \
"""
[pytest]
ci-environment=mk8s
""" # noqa
xml_doc = run_and_parse_with_config(testdir, config).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd('mk8s')))
# Test
xmlschema.assertValid(xml_doc)
def test_multiple_jira_references(testdir):
"""Verify that 'get_xsd' returns an XSD stream when a testcase is decorated Jira mark with multiple
arguments.
"""
# Setup
testdir.makepyfile("""
import pytest
@pytest.mark.jira('ASC-123', 'ASC-124')
@pytest.mark.test_id('123e4567-e89b-12d3-a456-426655440000')
def test_xsd():
pass
""")
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
xmlschema.assertValid(xml_doc)
def test_missing_global_property(testdir, properly_decorated_test_function, mocker):
"""Verify that XSD will enforce the presence of all required global test suite properties."""
# Mock
# Missing 'BUILD_URL'
mock_env_vars = [x for x in TEST_ENV_VARS if x != 'BUILD_URL']
mocker.patch('pytest_rpc.ASC_ENV_VARS', mock_env_vars)
# Setup
testdir.makepyfile(properly_decorated_test_function.format(test_name='test_missing_global',
test_id='123e4567-e89b-12d3-a456-426655440000',
jira_id='ASC-123'))
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
def test_extra_global_property(testdir, properly_decorated_test_function, mocker):
"""Verify that XSD will enforce the strict presence of only required global test suite properties."""
# Mock
# Extra 'BUILD_URL'
mock_env_vars = TEST_ENV_VARS + ['BUILD_URL']
mocker.patch('pytest_rpc.ASC_ENV_VARS', mock_env_vars)
# Setup
testdir.makepyfile(properly_decorated_test_function.format(test_name='test_extra_global',
test_id='123e4567-e89b-12d3-a456-426655440000',
jira_id='ASC-123'))
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
def test_typo_global_property(testdir, properly_decorated_test_function, mocker):
"""Verify that XSD will enforce the only certain property names are allowed for the test suite."""
# Mock
# Typo for RPC_RELEASE
mock_env_vars = [x for x in TEST_ENV_VARS if x != 'RPC_RELEASE'] + ['RCP_RELEASE']
mocker.patch('pytest_rpc.ASC_ENV_VARS', mock_env_vars)
# Setup
testdir.makepyfile(properly_decorated_test_function.format(test_name='test_typo_global',
test_id='123e4567-e89b-12d3-a456-426655440000',
jira_id='ASC-123'))
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
def test_missing_required_marks(testdir, undecorated_test_function):
"""Verify that XSD will enforce the presence of 'test_id' and 'jira_id' properties for test cases."""
# Setup
testdir.makepyfile(undecorated_test_function.format(test_name='test_typo_global'))
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
def test_missing_uuid_mark(testdir, single_decorated_test_function):
"""Verify that XSD will enforce the presence of 'test_id' property for test cases."""
# Setup
testdir.makepyfile(single_decorated_test_function.format(test_name='test_missing_uuid',
mark_type='jira',
mark_arg='ASC-123'))
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
def test_missing_jira_mark(testdir, single_decorated_test_function):
"""Verify that XSD will enforce the presence of 'jira' property for test cases."""
# Setup
testdir.makepyfile(single_decorated_test_function.format(test_name='test_missing_jira',
mark_type='test_id',
mark_arg='123e4567-e89b-12d3-a456-426655440000'))
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
def test_extra_testcase_property(testdir, properly_decorated_test_function):
"""Verify that XSD will enforce the strict presence of only required test case properties."""
# Setup
testdir.makepyfile(properly_decorated_test_function.format(test_name='test_extra_mark',
test_id='123e4567-e89b-12d3-a456-426655440000',
jira_id='ASC-123'))
xml_doc = run_and_parse(testdir).xml_doc
# Add another property element for the testcase.
xml_doc.find('./testcase/properties').append(etree.Element('property',
attrib={'name': 'extra', 'value': 'fail'}))
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
def test_typo_property(testdir, properly_decorated_test_function):
"""Verify that XSD will enforce the only certain property names are allowed for the testcase."""
# Setup
testdir.makepyfile(properly_decorated_test_function.format(test_name='test_typo_mark',
test_id='123e4567-e89b-12d3-a456-426655440000',
jira_id='ASC-123'))
xml_doc = run_and_parse(testdir).xml_doc
# Add another property element for the testcase.
xml_doc.find('./testcase/properties/property').attrib['name'] = 'wrong_test_id'
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
| 40.140271
| 120
| 0.580431
| 977
| 8,871
| 4.983623
| 0.134084
| 0.04313
| 0.077634
| 0.083385
| 0.825015
| 0.805299
| 0.784761
| 0.763196
| 0.73588
| 0.726638
| 0
| 0.042615
| 0.256679
| 8,871
| 220
| 121
| 40.322727
| 0.695784
| 0.245181
| 0
| 0.520833
| 0
| 0
| 0.144777
| 0.075748
| 0
| 0
| 0
| 0
| 0.114583
| 1
| 0.114583
| false
| 0.010417
| 0.0625
| 0
| 0.177083
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
29f0cc58b7be3d300b59e674aeee2c92c245bd31
| 41
|
py
|
Python
|
indexes/__init__.py
|
alon-albalak/XOR-COVID
|
319c33d30754e5c632e8e3307ff4f3774b9fca79
|
[
"MIT"
] | 1
|
2022-03-20T04:23:06.000Z
|
2022-03-20T04:23:06.000Z
|
indexes/__init__.py
|
alon-albalak/XOR-COVID
|
319c33d30754e5c632e8e3307ff4f3774b9fca79
|
[
"MIT"
] | null | null | null |
indexes/__init__.py
|
alon-albalak/XOR-COVID
|
319c33d30754e5c632e8e3307ff4f3774b9fca79
|
[
"MIT"
] | null | null | null |
from .faiss_indexes import Extract_Index
| 20.5
| 40
| 0.878049
| 6
| 41
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
29fa8f7dc4df55c15cb47ad15dccabbdcaf75d94
| 70
|
py
|
Python
|
app_SingSong/SingSong/Database/__init__.py
|
Satstef/SingSong
|
7ce88db041b621951c43df129111d9a6a6b53c7e
|
[
"MIT"
] | null | null | null |
app_SingSong/SingSong/Database/__init__.py
|
Satstef/SingSong
|
7ce88db041b621951c43df129111d9a6a6b53c7e
|
[
"MIT"
] | null | null | null |
app_SingSong/SingSong/Database/__init__.py
|
Satstef/SingSong
|
7ce88db041b621951c43df129111d9a6a6b53c7e
|
[
"MIT"
] | null | null | null |
from .Database_init import get_db, close_db, create_database, init_db
| 35
| 69
| 0.842857
| 12
| 70
| 4.5
| 0.666667
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 70
| 1
| 70
| 70
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4b17afcbb57b49c3d7a5000f584e909b599c7661
| 45
|
py
|
Python
|
examples/simple/on.py
|
jabjoe/python-energenie
|
66f64bae468aba5bec35b928c65f399fd2e09b1d
|
[
"BSD-3-Clause"
] | 1
|
2015-06-03T00:35:44.000Z
|
2015-06-03T00:35:44.000Z
|
examples/simple/on.py
|
rjw57/energenie
|
fdf82d36b86dff35943bb00f73b915240f5bd68c
|
[
"BSD-3-Clause"
] | null | null | null |
examples/simple/on.py
|
rjw57/energenie
|
fdf82d36b86dff35943bb00f73b915240f5bd68c
|
[
"BSD-3-Clause"
] | null | null | null |
from energenie import switch_on
switch_on()
| 11.25
| 31
| 0.822222
| 7
| 45
| 5
| 0.714286
| 0.457143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 45
| 3
| 32
| 15
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8a1f15fa864160b849c53fc1048f3c4661d0f7c8
| 115,558
|
py
|
Python
|
Packs/RubrikPolaris/Integrations/RubrikPolaris/RubrikPolaris_test.py
|
mazmat-panw/content
|
024a65c1dea2548e2637a9cbbe54966e9e34a722
|
[
"MIT"
] | 2
|
2021-12-06T21:38:24.000Z
|
2022-01-13T08:23:36.000Z
|
Packs/RubrikPolaris/Integrations/RubrikPolaris/RubrikPolaris_test.py
|
mazmat-panw/content
|
024a65c1dea2548e2637a9cbbe54966e9e34a722
|
[
"MIT"
] | 87
|
2022-02-23T12:10:53.000Z
|
2022-03-31T11:29:05.000Z
|
Packs/RubrikPolaris/Integrations/RubrikPolaris/RubrikPolaris_test.py
|
henry-sue-pa/content
|
043c6badfb4f9c80673cad9242fdea72efe301f7
|
[
"MIT"
] | 2
|
2022-01-05T15:27:01.000Z
|
2022-02-01T19:27:43.000Z
|
"""Test File for RubrikPolaris Integration."""
import json
import io
import time
import pytest
import os
from CommonServerPython import remove_empty_elements
from RubrikPolaris import ERROR_MESSAGES, OUTPUT_PREFIX, MESSAGES, TOKEN_EXPIRY_TIME_SPAN, TOKEN_EXPIRY_BUFFER_TIME, \
IOC_TYPE_ENUM
from unittest.mock import patch
BASE_URL = "https://rubrik-se-beta.my.rubrik.com/api"
BASE_URL_GRAPHQL = BASE_URL + "/graphql"
BASE_URL_SESSION = BASE_URL + "/session"
last_fetch = "2021-10-22T14:55:51.616000Z"
first_fetch = "2021-10-22T14:55:51.616Z"
sonar_on_demand_file_path = "test_data/sonar_ondemand_scan_success_response.json"
enum_values_file_path = "test_data/enum_values.json"
mock_command = 'demistomock.command'
mock_params = 'demistomock.params'
MOCK_INTEGRATION_CONTEXT = {
'api_token': "dummy_token",
'valid_until': int(time.time()) + TOKEN_EXPIRY_TIME_SPAN - TOKEN_EXPIRY_BUFFER_TIME
}
SDK_ERROR_MESSAGES = {
'INVALID_SLA_LIST_OBJECT_TYPE': "'{}' is an invalid value for 'object types'. "
"Value must be in ['UNKNOWN_OBJECT_TYPE', 'SAP_HANA_OBJECT_TYPE', "
"'AWS_EC2_EBS_OBJECT_TYPE', 'AWS_RDS_OBJECT_TYPE', 'AZURE_OBJECT_TYPE', "
"'GCP_OBJECT_TYPE', 'O365_OBJECT_TYPE', 'VSPHERE_OBJECT_TYPE', "
"'KUPR_OBJECT_TYPE', 'FILESET_OBJECT_TYPE', 'CASSANDRA_OBJECT_TYPE', "
"'VOLUME_GROUP_OBJECT_TYPE', 'MSSQL_OBJECT_TYPE', "
"'AZURE_SQL_DATABASE_OBJECT_TYPE', 'AZURE_SQL_MANAGED_INSTANCE_OBJECT_TYPE'].",
'INVALID_SORT_ORDER': "'{}' is an invalid value for 'sort_order'. Value must be in ['ASC', 'DESC'].",
'INVALID_OBJECT_SNAPSHOT_SORT_ORDER': "'{}' is an invalid value for 'sort_order'. "
"Value must be in ['Asc', 'Desc'].",
'INVALID_REQUESTED_HASH_TYPE': "'{}' is an invalid value for 'requested_hash_types'. "
"Value must be in ['HASH_TYPE_M_D5', 'HASH_TYPE_SH_A1', 'HASH_TYPE_SH_A256']."
}
def util_load_json(path):
"""Load file in JSON format."""
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_main_incorrect_credentials(requests_mock, monkeypatch, capfd, caplog):
"""Tests the execution of main function when incorrect credentials are provided."""
from RubrikPolaris import main
monkeypatch.setattr(mock_params, lambda: {
"url": "rubrik-se-beta",
"email": {
"identifier": "incorrect@account.com",
"password": "password"
}})
monkeypatch.setattr(mock_command, lambda: "rubrik-sonar-policy-analyzer-groups-list")
monkeypatch.setattr('demistomock.args', lambda: {})
response_data = {
"code": 401,
"uri": "/api/session",
"traceSpan": {
"traceId": "dummy-trace",
"operation": "/api/session",
"spanId": "qi0QREAFDyE="
},
"message": "UNAUTHENTICATED: wrong username or password"
}
requests_mock.post(BASE_URL_SESSION, json=response_data)
with pytest.raises(SystemExit):
caplog.set_level(50)
capfd.close()
main()
def test_main_unknown_commmand(requests_mock, monkeypatch, capfd):
"""Tests the execution of main function when unknown command name is provided."""
from RubrikPolaris import main
monkeypatch.setattr(mock_params, lambda: {
"url": "rubrik-se-beta",
"email": {
"identifier": "username@domain.com",
"password": "password"
}})
monkeypatch.setattr(mock_command, lambda: "unknown_command")
response_data = {
"access_token": "",
"mfa_token": "dummy_token"
}
requests_mock.post(BASE_URL_SESSION, json=response_data)
with pytest.raises(SystemExit):
capfd.close()
main()
def test_main_no_json_no_email(monkeypatch, capfd):
"""Tests the execution of main function when neither service account json nor email-password have been provided."""
from RubrikPolaris import main
monkeypatch.setattr(mock_params, lambda: {
"url": "rubrik-se-beta"})
monkeypatch.setattr(mock_command, lambda: "some_command")
with pytest.raises(SystemExit):
capfd.close()
main()
@pytest.mark.parametrize("service_account_json", ['{', '{"client_id":}', '{"client_id"=""}',
'{"client_id": "client", "name": "name","client_secret": "secret"}'])
def test_main_incorrect_json_structure(monkeypatch, capfd, service_account_json, caplog):
"""Tests the execution of main function when incorrectly formatted service account json is provided."""
from RubrikPolaris import main
monkeypatch.setattr(mock_params, lambda: {
"url": "rubrik-se-beta",
"service_account_json": service_account_json})
monkeypatch.setattr(mock_command, lambda: "some_command")
with pytest.raises(SystemExit):
capfd.close()
caplog.set_level(50)
main()
@pytest.fixture()
def client(requests_mock, capfd):
"""Client fixture."""
from RubrikPolaris import MyClient
data = {
"access_token": "dummy_token",
"mfa_token": "dummy_token"
}
requests_mock.post(BASE_URL_SESSION, json=data)
capfd.close()
client_obj = MyClient(
domain="rubrik-se-beta",
username="dummy_username",
password="dummy_password",
insecure=True
)
return client_obj
def test_test_module_for_correct_params(client, monkeypatch, requests_mock):
"""Test test_module function when correct parameters are passed."""
from RubrikPolaris import test_module
params = {
"isFetch": True,
"max_fetch": "30",
"first_fetch": "3 days"
}
list_policies_response = {
"data": {
}
}
fetch_data_response = {
"data": {
}
}
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': list_policies_response},
{'json': enum_values.get('activity_type_enum')},
{'json': enum_values.get('event_sort_by_enum')},
{'json': enum_values.get('event_sort_order_enum')},
{'json': fetch_data_response}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
assert test_module(client, params) == 'ok'
@pytest.mark.parametrize("max_fetch, first_fetch", [("-1", "3 days"), ("20", "abc")])
def test_test_module_for_incorrect_params(client, monkeypatch, requests_mock, max_fetch, first_fetch):
"""Test test_module function to raise ValueError with appropriate message when incorrect parameters are passed."""
from RubrikPolaris import test_module
params = {
"isFetch": True,
"max_fetch": max_fetch,
"first_fetch": first_fetch
}
list_policies_response = {
"data": {
}
}
requests_mock.post(BASE_URL_GRAPHQL, json=list_policies_response)
with pytest.raises(ValueError):
test_module(client, params)
@pytest.mark.parametrize("integration_context", [
({}),
({'api_token': "dummy_token"}),
({'api_token': "dummy_token", 'valid_until': time.time() - 1})
])
@patch('demistomock.getIntegrationContext')
def test_get_api_token_when_not_found_in_integration_context(mocker_get_context, client,
integration_context):
"""Test cases for scenario when there is no api_token or valid_until in integration context."""
mocker_get_context.return_value = integration_context
api_token = client.get_api_token()
assert api_token == bool(False)
@patch('demistomock.getIntegrationContext')
def test_get_api_token_when_found_in_integration_context(mocker_get_context, client):
"""Test cases for scenario when there is api_token and valid_until in integration context."""
mocker_get_context.return_value = MOCK_INTEGRATION_CONTEXT
api_token = client.get_api_token()
assert api_token == "dummy_token"
assert mocker_get_context.call_count == 1
def test_fetch_incidents_success_without_last_run(client, requests_mock):
"""Test fetch_incidents function to return incidents and new last run with provided empty last run."""
from RubrikPolaris import fetch_incidents
fetch_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/fetch_incidents_success_response.json"))
incidents = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/fetch_incidents_success_incidents.json"))
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('activity_type_enum')},
{'json': enum_values.get('event_sort_by_enum')},
{'json': enum_values.get('event_sort_order_enum')},
{'json': fetch_response}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
fetch_incidents_last_run, fetch_incidents_incidents = fetch_incidents(client, {},
{"first_fetch": f"{first_fetch}",
"max_fetch": 2})
last_run = {'last_fetch': f'{last_fetch}',
'next_page_token':
fetch_response["data"]["activitySeriesConnection"]["pageInfo"]["endCursor"]}
assert fetch_incidents_last_run == last_run
assert fetch_incidents_incidents == incidents
def test_fetch_incidents_success_with_last_run(client, requests_mock):
"""Test fetch_incidents function to return incidents and new last run with a provided last run."""
from RubrikPolaris import fetch_incidents
fetch_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/fetch_incidents_success_response.json"))
incidents = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/fetch_incidents_success_incidents.json"))
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('activity_type_enum')},
{'json': enum_values.get('event_sort_by_enum')},
{'json': enum_values.get('event_sort_order_enum')},
{'json': fetch_response}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
fetch_incidents_last_run, fetch_incidents_incidents = fetch_incidents(client,
{"last_fetch": f"{last_fetch}",
"next_page_token": "dummy-token"},
{"first_fetch": f"{first_fetch}",
"max_fetch": 2})
last_run = {'last_fetch': f'{last_fetch}',
'next_page_token':
fetch_response["data"]["activitySeriesConnection"]["pageInfo"]["endCursor"]}
assert fetch_incidents_last_run == last_run
assert fetch_incidents_incidents == incidents
def test_fetch_incidents_empty_response_without_last_run(client, requests_mock):
"""Test fetch_incidents function to return empty incidents and new last run without a provided last run."""
from RubrikPolaris import fetch_incidents
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
fetch_incidents_empty_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/fetch_incidents_empty_response.json"))
responses = [
{'json': enum_values.get('activity_type_enum')},
{'json': enum_values.get('event_sort_by_enum')},
{'json': enum_values.get('event_sort_order_enum')},
{'json': fetch_incidents_empty_response}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
fetch_incidents_last_run, fetch_incidents_incidents = fetch_incidents(client, {},
{"first_fetch": f"{first_fetch}",
"max_fetch": 2})
last_run = {'last_fetch': f'{last_fetch}'}
assert fetch_incidents_last_run == last_run
assert fetch_incidents_incidents == []
def test_fetch_incidents_empty_response_with_last_run(client, requests_mock):
"""Test fetch_incidents function to return empty incidents and new last run with a provided last run."""
from RubrikPolaris import fetch_incidents
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
fetch_incidents_empty_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/fetch_incidents_empty_response.json"))
responses = [
{'json': enum_values.get('activity_type_enum')},
{'json': enum_values.get('event_sort_by_enum')},
{'json': enum_values.get('event_sort_order_enum')},
{'json': fetch_incidents_empty_response}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
fetch_incidents_last_run, fetch_incidents_incidents = fetch_incidents(client,
{"last_fetch": f"{last_fetch}",
"next_page_token": "dummy-token"},
{"first_fetch": f"{first_fetch}",
"max_fetch": 2})
last_run = {'last_fetch': f'{last_fetch}',
'next_page_token': 'dummy-token'}
assert fetch_incidents_last_run == last_run
assert fetch_incidents_incidents == []
def test_object_search_success(client, requests_mock):
"""Tests success for rubrik_polaris_object_search."""
from RubrikPolaris import rubrik_polaris_object_search_command
args = {
"object_name": "admin"
}
object_search_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/object_search_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/object_search_hr1.md"), 'r') as f:
object_search_response_hr = f.read()
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('sort_by_enum')},
{'json': enum_values.get('sort_order_enum')},
{'json': object_search_response.get('raw_response')}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_polaris_object_search_command(client, args)
assert response.raw_response == object_search_response.get('raw_response')
assert response.outputs.get(f'{OUTPUT_PREFIX["GLOBAL_SEARCH"]}(val.id == obj.id)') \
== remove_empty_elements(object_search_response.get('outputs'))
assert response.outputs.get(f'{OUTPUT_PREFIX["PAGE_TOKEN_GLOBAL_SEARCH"]}(val.name == obj.name)') \
== remove_empty_elements(object_search_response.get('page_token'))
assert response.readable_output == object_search_response_hr
def test_object_search_with_token_hr_success(client, requests_mock):
"""Tests success for hr with next token for rubrik_polaris_object_search."""
from RubrikPolaris import rubrik_polaris_object_search_command
args = {
"object_name": "admin",
"limit": 2
}
object_search_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/object_search_response2.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/object_search_hr2.md"), 'r') as f:
object_search_response_hr = f.read()
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('sort_by_enum')},
{'json': enum_values.get('sort_order_enum')},
{'json': object_search_response.get('raw_response')}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_polaris_object_search_command(client, args)
assert response.raw_response == object_search_response.get('raw_response')
assert response.outputs.get(f'{OUTPUT_PREFIX["GLOBAL_SEARCH"]}(val.id == obj.id)') \
== remove_empty_elements(object_search_response.get('outputs'))
assert response.outputs.get(f'{OUTPUT_PREFIX["PAGE_TOKEN_GLOBAL_SEARCH"]}(val.name == obj.name)') \
== remove_empty_elements(object_search_response.get('page_token'))
assert response.readable_output == object_search_response_hr
@pytest.mark.parametrize("args, exception, error", [
({"object_name": ""}, ValueError, ERROR_MESSAGES["MISSING_REQUIRED_FIELD"].format("object_name")),
({"object_name": "abc", "limit": "ab"}, ValueError, '"ab" is not a valid number'),
({"object_name": "abc", "limit": 1001}, ValueError, ERROR_MESSAGES['INVALID_LIMIT'].format("1001")),
({"object_name": "abc", "limit": -1}, ValueError, ERROR_MESSAGES['INVALID_LIMIT'].format("-1"))
])
def test_object_search_arguments_failure(client, requests_mock, args, exception, error):
"""Tests failure for rubrik_polaris_object_search."""
from RubrikPolaris import rubrik_polaris_object_search_command
response = {"data": {}}
requests_mock.post(BASE_URL_GRAPHQL, json=response)
with pytest.raises(exception) as e:
rubrik_polaris_object_search_command(client, args)
assert str(e.value) == error
def test_sonar_policies_list_when_empty_response(client, requests_mock):
"""Tests rubrik_sonar_policies_list when empty response is returned."""
from RubrikPolaris import rubrik_sonar_policies_list_command
empty_response = util_load_json(
os.path.join(os.path.dirname(__file__), 'test_data/sonar_policies_list_empty_response.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=empty_response)
list_policies_command_results = rubrik_sonar_policies_list_command(client, {})
assert list_policies_command_results.readable_output == MESSAGES["NO_RECORDS_FOUND"].format("sonar policies")
assert list_policies_command_results.outputs is None
def test_sonar_policies_list_success(client, requests_mock):
"""Tests rubrik_sonar_policies_list when response is not empty."""
from RubrikPolaris import rubrik_sonar_policies_list_command
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
'test_data/sonar_policies_list_success_response.json'))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/sonar_policies_list_success_hr.md"), 'r') as f:
sonar_policies_list_hr = f.read()
sonar_policies_list_outputs = util_load_json(os.path.join(os.path.dirname(__file__),
'test_data/sonar_policies_list_success_outputs.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=raw_response)
list_policies_command_results = rubrik_sonar_policies_list_command(client, {})
assert list_policies_command_results.raw_response == raw_response
assert list_policies_command_results.readable_output == sonar_policies_list_hr
assert list_policies_command_results.outputs == sonar_policies_list_outputs
def test_sonar_policy_analyzer_groups_list_when_empty_response(client, requests_mock):
"""Tests rubrik_sonar_policy_analyzer_groups_list_command when empty response is returned."""
from RubrikPolaris import rubrik_sonar_policy_analyzer_groups_list_command
empty_response = util_load_json(
os.path.join(os.path.dirname(__file__), 'test_data/sonar_policy_analyzer_groups_list_empty_response.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=empty_response)
list_policy_analyzer_groups_command_results = rubrik_sonar_policy_analyzer_groups_list_command(client, {})
assert list_policy_analyzer_groups_command_results.readable_output == MESSAGES["NO_RECORDS_FOUND"] \
.format("sonar policy analyzer groups")
assert list_policy_analyzer_groups_command_results.outputs is None
def test_sonar_policy_analyzer_groups_list_success(client, requests_mock):
"""Tests rubrik_sonar_policy_analyzer_groups_list_command when response is not empty."""
from RubrikPolaris import rubrik_sonar_policy_analyzer_groups_list_command
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
'test_data/sonar_policy_analyzer_groups_list_success_response.json'))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/sonar_policy_analyzer_groups_list_success_hr.md"), 'r') as f:
sonar_policy_analyzer_groups_list_hr = f.read()
sonar_policy_analyzer_groups_list_outputs = util_load_json(os.path.join(
os.path.dirname(__file__), 'test_data/sonar_policy_analyzer_groups_list_success_outputs.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=raw_response)
list_policy_analyzer_groups_command_results = rubrik_sonar_policy_analyzer_groups_list_command(client, {})
assert list_policy_analyzer_groups_command_results.raw_response == raw_response
assert list_policy_analyzer_groups_command_results.readable_output == sonar_policy_analyzer_groups_list_hr
assert list_policy_analyzer_groups_command_results.outputs == sonar_policy_analyzer_groups_list_outputs
@pytest.mark.parametrize("response", [
"empty_response", "raw_response"
])
def test_vm_object_metadata_when_valid_response_is_returned(client, requests_mock, response):
"""Tests success for rubrik_polaris_vm_object_metadata_get."""
from RubrikPolaris import rubrik_polaris_vm_object_metadata_get_command
args = {"object_id": "e060116b-f9dc-56a1-82a6-1b968d2f6cef"}
data = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/vm_object_metadata_get.json"))
requests_mock.post(BASE_URL_GRAPHQL, json=data.get(f"{response}"))
object_response = rubrik_polaris_vm_object_metadata_get_command(client, args)
if response == "empty_response":
assert object_response.readable_output == MESSAGES["NO_RECORDS_FOUND"].format("vm object metadata")
else:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/vm_object_metadata_get.md"), 'r') as f:
object_response_hr = f.read()
assert object_response.raw_response == data.get('raw_response')
assert object_response.outputs == remove_empty_elements(data.get('outputs'))
assert object_response.readable_output == object_response_hr
@pytest.mark.parametrize("args", [
{"object_id": ""}
])
def test_vm_object_metadata_invalid_object_id(client, requests_mock, args):
"""Tests incorrect object_id for rubrik_polaris_vm_object_metadata_get."""
from RubrikPolaris import rubrik_polaris_vm_object_metadata_get_command
response = {"data": {}}
requests_mock.post(BASE_URL_GRAPHQL, json=response)
with pytest.raises(ValueError) as e:
rubrik_polaris_vm_object_metadata_get_command(client, args)
assert str(e.value) == ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('object_id')
def test_vm_objects_list_success(client, requests_mock):
"""Tests success for rubrik_polaris_vm_objects_list."""
from RubrikPolaris import rubrik_polaris_vm_objects_list_command
objects_list_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/objects_list_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/objects_list_hr.md"), 'r') as f:
objects_list_response_hr = f.read()
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('sort_by_enum')},
{'json': enum_values.get('sort_order_enum')},
{'json': objects_list_response.get('raw_response')}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_polaris_vm_objects_list_command(client, args={"limit": 2})
assert response.raw_response == objects_list_response.get('raw_response')
assert response.outputs.get(f'{OUTPUT_PREFIX["VM_OBJECT"]}(val.id == obj.id)') \
== remove_empty_elements(objects_list_response.get('outputs'))
assert response.outputs.get(f'{OUTPUT_PREFIX["PAGE_TOKEN_VM_OBJECT"]}(val.name == obj.name)') \
== remove_empty_elements(objects_list_response.get('page_token'))
assert response.readable_output == objects_list_response_hr
@pytest.mark.parametrize("args, error", [
({"is_relic": "a"}, ERROR_MESSAGES['INVALID_BOOLEAN'].format("a", "is_relic")),
({"is_replicated": "tr"}, ERROR_MESSAGES['INVALID_BOOLEAN'].format("tr", "is_replicated")),
({"limit": "a"}, "\"a\" is not a valid number"),
({"limit": 1001}, ERROR_MESSAGES['INVALID_LIMIT'].format("1001"))
])
def test_vm_objects_list_when_invalid_arguments_are_provided(client, requests_mock, args, error):
"""Tests invalid arguments for rubrik_polaris_vm_objects_list."""
from RubrikPolaris import rubrik_polaris_vm_objects_list_command
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('sort_by_enum')},
{'json': enum_values.get('sort_order_enum')}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
with pytest.raises(ValueError) as e:
rubrik_polaris_vm_objects_list_command(client, args=args)
assert str(e.value) == error
def test_sonar_on_demand_scan_when_success_response(client, requests_mock):
"""Tests rubrik_sonar_ondemand_scan_command when response is success."""
from RubrikPolaris import rubrik_sonar_ondemand_scan_command
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
f'{sonar_on_demand_file_path}'))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/sonar_ondemand_scan_success_hr.md"), 'r') as f:
sonar_ondemand_scan_hr = f.read()
sonar_on_demand_scan_outputs = util_load_json(os.path.join(
os.path.dirname(__file__), 'test_data/sonar_ondemand_scan_success_outputs.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=raw_response)
args = {
"scan_name": "dummy",
"objects_to_scan": "1234-abc, 2345-bcd",
"sonar_policy_analyzer_groups": '{ "id": 1, "name":"ABC", "groupType":"ABC",'
'"analyzers": [{ "id": 1, "name": "anc", "analyzerType": "xyz"},'
'{"id": 2, "name": "xyz", "analyzerType": "klm"} ]}',
}
sonar_on_demand_scan_command_results = rubrik_sonar_ondemand_scan_command(client, args)
assert sonar_on_demand_scan_command_results.raw_response == raw_response
assert sonar_on_demand_scan_command_results.readable_output == sonar_ondemand_scan_hr
assert sonar_on_demand_scan_command_results.outputs == sonar_on_demand_scan_outputs
@pytest.mark.parametrize("objects_to_scan, sonar_policy_analyzer_groups, exception, error",
[("", '', ValueError, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format("objects_to_scan")),
("1234-abc, 2345-bcd", "", ValueError,
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format("sonar_policy_analyzer_groups")),
("1234-abc, 2345-bcd", "{}", ValueError,
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format("sonar_policy_analyzer_groups")),
("1234-abc, 2345-bcd", "{", ValueError,
ERROR_MESSAGES['JSON_DECODE'].format("sonar_policy_analyzer_groups")),
("1234-abc, 2345-bcd", '[{"id": dummy-id', ValueError,
ERROR_MESSAGES['JSON_DECODE'].format("sonar_policy_analyzer_groups")),
("1234-abc, 2345-bcd", '[]', ValueError,
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format("sonar_policy_analyzer_groups"))
])
def test_sonar_on_demand_scan_when_invalid_input(client, requests_mock, objects_to_scan, sonar_policy_analyzer_groups,
exception, error):
"""Tests rubrik_sonar_ondemand_scan_command when invalid inputs are provided."""
from RubrikPolaris import rubrik_sonar_ondemand_scan_command
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
f'{sonar_on_demand_file_path}'))
requests_mock.post(BASE_URL_GRAPHQL, json=raw_response)
args = {
"scan_name": "",
"objects_to_scan": objects_to_scan,
"sonar_policy_analyzer_groups": sonar_policy_analyzer_groups,
}
with pytest.raises(exception) as e:
rubrik_sonar_ondemand_scan_command(client, args)
assert str(e.value) == error
def test_sonar_ondemand_scan_when_empty_response(client, requests_mock):
"""Tests rubrik_sonar_ondemand_scan_command when empty response is returned."""
from RubrikPolaris import rubrik_sonar_ondemand_scan_command
empty_response = util_load_json(
os.path.join(os.path.dirname(__file__), 'test_data/sonar_policy_analyzer_groups_list_empty_response.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=empty_response)
args = {
"scan_name": "dummy",
"objects_to_scan": "1234-abc, 2345-bcd",
"sonar_policy_analyzer_groups": '{ "id": 1, "name":"ABC", "groupType":"ABC",'
'"analyzers": [{ "id": 1, "name": "anc", "analyzerType": "xyz"},'
'{"id": 2, "name": "xyz", "analyzerType": "klm"} ]}',
}
sonar_on_demand_scan_command_results = rubrik_sonar_ondemand_scan_command(client, args)
assert sonar_on_demand_scan_command_results.readable_output == MESSAGES["NO_RESPONSE"]
assert sonar_on_demand_scan_command_results.outputs is None
@pytest.mark.parametrize("file_suffix", ["complete", "fail", "progress"])
def test_sonar_on_demand_scan_status_when_success_response(client, requests_mock, file_suffix):
"""Tests rubrik_sonar_ondemand_scan_status_command when response is success."""
from RubrikPolaris import rubrik_sonar_ondemand_scan_status_command
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
f'test_data/sonar_ondemand_scan_status_success'
f'_{file_suffix}_response.json'))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
f"test_data/sonar_ondemand_scan_status_success_{file_suffix}_hr.md"), 'r') as f:
sonar_ondemand_scan_status_hr = f.read()
sonar_on_demand_scan_status_outputs = util_load_json(os.path.join(
os.path.dirname(__file__), f'test_data/sonar_ondemand_scan_status_success_{file_suffix}_outputs.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=raw_response)
args = {
"crawl_id": "587d147a-add9-4152-b7a0-5a667d99f395"
}
sonar_on_demand_scan_status_command_results = rubrik_sonar_ondemand_scan_status_command(client, args)
assert sonar_on_demand_scan_status_command_results.raw_response == raw_response
assert sonar_on_demand_scan_status_command_results.readable_output == sonar_ondemand_scan_status_hr
assert sonar_on_demand_scan_status_command_results.outputs == sonar_on_demand_scan_status_outputs
@pytest.mark.parametrize("crawl_id, exception, error",
[("", ValueError, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format("crawl_id")),
(None, ValueError, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format("crawl_id"))
])
def test_sonar_on_demand_scan_status_when_invalid_input(client, crawl_id,
exception, error):
"""Tests rubrik_sonar_ondemand_scan_status_command when invalid inputs are provided."""
from RubrikPolaris import rubrik_sonar_ondemand_scan_status_command
args = {
"crawl_id": crawl_id,
}
with pytest.raises(exception) as e:
rubrik_sonar_ondemand_scan_status_command(client, args)
assert str(e.value) == error
def test_sonar_ondemand_scan_status_when_empty_response(client, requests_mock):
"""Tests rubrik_sonar_ondemand_scan_status_command when empty response is returned."""
from RubrikPolaris import rubrik_sonar_ondemand_scan_status_command
empty_response = util_load_json(
os.path.join(os.path.dirname(__file__), 'test_data/sonar_ondemand_scan_status_empty_response.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=empty_response)
args = {
"crawl_id": "dummy-id"
}
sonar_on_demand_scan_status_command_results = rubrik_sonar_ondemand_scan_status_command(client, args)
assert sonar_on_demand_scan_status_command_results.readable_output == MESSAGES["NO_RESPONSE"]
assert sonar_on_demand_scan_status_command_results.outputs is None
@pytest.mark.parametrize("crawl_id, file_type, exception, error",
[("", "", ValueError, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format("crawl_id")),
("dummy_crawl_id", "", ValueError,
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format("file_type")),
("dummy_crawl_id", "not_valid_file_type", ValueError,
"'not_valid_file_type' is an invalid value for 'file type'. Value must be in "
"['ANY', 'HITS', 'STALE', 'OPEN_ACCESS', 'STALE_HITS', 'OPEN_ACCESS_HITS'].")])
def test_sonar_on_demand_scan_result_when_invalid_input(client, requests_mock, crawl_id, file_type, exception, error):
"""Tests rubrik_sonar_ondemand_scan_result_command when response is success."""
from RubrikPolaris import rubrik_sonar_ondemand_scan_result_command
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
f'{sonar_on_demand_file_path}'))
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('file_count_type_enum')},
{'json': raw_response}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
args = {
"crawl_id": crawl_id,
"file_type": file_type,
}
with pytest.raises(exception) as e:
rubrik_sonar_ondemand_scan_result_command(client, args)
assert str(e.value) == error
def test_sonar_ondemand_scan_result_when_empty_response(client, requests_mock):
"""Tests rubrik_sonar_ondemand_scan_result_command when empty response is returned."""
from RubrikPolaris import rubrik_sonar_ondemand_scan_result_command
empty_response = util_load_json(
os.path.join(os.path.dirname(__file__), 'test_data/sonar_ondemand_scan_result_empty_response.json'))
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('file_count_type_enum')},
{'json': empty_response}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
args = {
"crawl_id": "dummy_id",
"file_type": "HITS",
}
sonar_on_demand_scan_result_command_results = rubrik_sonar_ondemand_scan_result_command(client, args)
assert sonar_on_demand_scan_result_command_results.readable_output == MESSAGES["NO_RESPONSE"]
assert sonar_on_demand_scan_result_command_results.outputs is None
def test_sonar_on_demand_scan_result_when_success_response(client, requests_mock):
"""Tests rubrik_sonar_ondemand_scan_result_command when response is success."""
from RubrikPolaris import rubrik_sonar_ondemand_scan_result_command
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
'test_data/sonar_ondemand_scan_result_success_response.json'))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/sonar_ondemand_scan_result_success_hr.md"), 'r') as f:
sonar_ondemand_scan_hr = f.read()
sonar_on_demand_scan_outputs = util_load_json(os.path.join(
os.path.dirname(__file__), 'test_data/sonar_ondemand_scan_result_success_outputs.json'))
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('file_count_type_enum')},
{'json': raw_response}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
args = {
"crawl_id": "dummy_id",
"file_type": "HITS",
}
sonar_on_demand_scan_result_command_results = rubrik_sonar_ondemand_scan_result_command(client, args)
assert sonar_on_demand_scan_result_command_results.raw_response == raw_response
assert sonar_on_demand_scan_result_command_results.readable_output == sonar_ondemand_scan_hr
assert sonar_on_demand_scan_result_command_results.outputs == sonar_on_demand_scan_outputs
@pytest.mark.parametrize("empty_response", [True, False])
def test_vm_object_snapshot_get_success(client, requests_mock, empty_response):
"""Tests success for rubrik_polaris_vm_object_snapshot_get."""
from RubrikPolaris import rubrik_polaris_vm_object_snapshot_list_command
object_snapshot_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/vm_object_snapshot_get_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/vm_object_snapshot_get_hr.md"), 'r') as f:
object_snapshot_response_hr = f.read()
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
args = {"object_id": "dummy_id", "start_date": "2020-03-21", "end_date": "2020-06-21", "timezone_offset": "1"}
if empty_response:
responses = [
{'json': enum_values.get('snapshot_group_by_enum')},
{'json': enum_values.get('snapshot_group_by_enum')},
{'json': object_snapshot_response.get('empty_response')}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_polaris_vm_object_snapshot_list_command(client, args=args)
assert response.readable_output == MESSAGES['NO_RECORDS_FOUND'].format('vm object snapshots')
else:
responses = [
{'json': enum_values.get('snapshot_group_by_enum')},
{'json': enum_values.get('snapshot_group_by_enum')},
{'json': object_snapshot_response.get('raw_response')}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_polaris_vm_object_snapshot_list_command(client, args=args)
assert response.raw_response == object_snapshot_response.get('raw_response')
assert response.outputs == remove_empty_elements(object_snapshot_response.get('outputs'))
assert response.readable_output == object_snapshot_response_hr
@pytest.mark.parametrize("args, error", [
({"object_id": "", "start_date": "tr", "end_date": "tr", "timezone_offset": "1.5"},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('object_id')),
({"object_id": "dummy_id", "start_date": "", "end_date": "tr", "timezone_offset": "1.5"},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('start_date')),
({"object_id": "dummy_id", "start_date": "tr", "end_date": "", "timezone_offset": "1.5"},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('end_date')),
({"object_id": "dummy_id", "start_date": "abc", "end_date": "tr", "timezone_offset": "1.5"},
'"abc" is not a valid date'),
({"object_id": "dummy_id", "start_date": "tr", "end_date": "tr", "timezone_offset": "1.5",
"cluster_connected": "tr"}, ERROR_MESSAGES['INVALID_BOOLEAN'].format('tr', 'cluster_connected')),
])
def test_vm_object_snapshot_get_when_invalid_arguments_are_provided(client, requests_mock, args, error):
"""Tests invalid arguments for rubrik_polaris_vm_object_snapshot_get."""
from RubrikPolaris import rubrik_polaris_vm_object_snapshot_list_command
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('snapshot_group_by_enum')},
{'json': enum_values.get('snapshot_group_by_enum')}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
with pytest.raises(ValueError) as e:
rubrik_polaris_vm_object_snapshot_list_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_radar_anomaly_csv_analysis_success(client, requests_mock, empty_response):
"""Tests success for rubrik_radar_anomaly_csv_analysis."""
from RubrikPolaris import rubrik_radar_anomaly_csv_analysis_command
radar_anomaly_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/radar_anomaly_csv_analysis_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/radar_anomaly_csv_analysis_hr.md"), 'r') as f:
radar_anomaly_hr = f.read()
args = {"object_id": "dummy", "cluster_id": "dummy", "snapshot_id": "dummy"}
if empty_response:
response = radar_anomaly_response.get('empty_response')
requests_mock.post(BASE_URL_GRAPHQL, json=response)
response = rubrik_radar_anomaly_csv_analysis_command(client, args=args)
assert response.readable_output == MESSAGES['NO_RESPONSE']
else:
responses = radar_anomaly_response.get('raw_response')
requests_mock.post(BASE_URL_GRAPHQL, json=responses)
response = rubrik_radar_anomaly_csv_analysis_command(client, args=args)
assert response.raw_response == radar_anomaly_response.get('raw_response')
assert response.outputs == remove_empty_elements(radar_anomaly_response.get('outputs'))
assert response.readable_output == radar_anomaly_hr
@pytest.mark.parametrize("args, error", [
({"object_id": "dummy", "cluster_id": "dummy"},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('snapshot_id')),
({"object_id": "dummy_id", "cluster_id": "", "snapshot_id": "tr"},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('cluster_id')),
({"object_id": "", "cluster_id": " dummy", "snapshot_id": "dummy"},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('object_id')),
])
def test_radar_anomaly_csv_analysis_when_invalid_arguments_are_provided(client, requests_mock, args, error):
"""Tests invalid arguments for rubrik_radar_anomaly_csv_analysis."""
from RubrikPolaris import rubrik_radar_anomaly_csv_analysis_command
with pytest.raises(ValueError) as e:
rubrik_radar_anomaly_csv_analysis_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_sonar_csv_download_success(client, requests_mock, empty_response):
"""Tests success for rubrik_sonar_csv_download."""
from RubrikPolaris import rubrik_sonar_csv_download_command
sonar_csv_download_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/sonar_csv_download_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/sonar_csv_download_hr.md"), 'r') as f:
sonar_csv_download_hr = f.read()
args = {"object_id": "dummy", "snapshot_id": "dummy"}
if empty_response:
requests_mock.post(BASE_URL_GRAPHQL, json=sonar_csv_download_response.get('empty_response'))
response = rubrik_sonar_csv_download_command(client, args=args)
assert response.readable_output == MESSAGES["NO_RESPONSE"]
else:
requests_mock.post(BASE_URL_GRAPHQL, json=sonar_csv_download_response.get('raw_response'))
response = rubrik_sonar_csv_download_command(client, args=args)
assert response.raw_response == sonar_csv_download_response.get('raw_response')
assert response.outputs == remove_empty_elements(sonar_csv_download_response.get('outputs'))
assert response.readable_output == sonar_csv_download_hr
@pytest.mark.parametrize("args, error", [
({"object_id": "dummy"},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('snapshot_id')),
({"object_id": "", "snapshot_id": "dummy"},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('object_id')),
])
def test_sonar_csv_download_when_invalid_arguments_are_provided(client, args, error):
"""Tests invalid arguments for rubrik_sonar_csv_download."""
from RubrikPolaris import rubrik_sonar_csv_download_command
with pytest.raises(ValueError) as e:
rubrik_sonar_csv_download_command(client, args=args)
assert str(e.value) == error
def test_snapshot_files_list_success(client, requests_mock):
"""Tests rubrik_gps_snapshot_files_list_command when response is not empty."""
from RubrikPolaris import rubrik_gps_snapshot_files_list_command
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
'test_data/snapshot_files_list_success_response.json'))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/snapshot_files_list_success_hr.md"), 'r') as f:
snapshot_files_list_hr = f.read()
snapshot_files_list_outputs = util_load_json(os.path.join(
os.path.dirname(__file__), 'test_data/snapshot_files_list_success_outputs.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=raw_response)
args = {
'snapshot_id': "90858c2f-e572-5b9c-b455-ba309d50c1a2"
}
list_snapshot_files_command_results = rubrik_gps_snapshot_files_list_command(client, args)
assert list_snapshot_files_command_results.raw_response == raw_response
assert list_snapshot_files_command_results.readable_output == snapshot_files_list_hr
assert list_snapshot_files_command_results.outputs == snapshot_files_list_outputs
def test_snapshot_files_list_when_empty_response(client, requests_mock):
"""Tests rubrik_gps_snapshot_files_list_command when empty response is returned."""
from RubrikPolaris import rubrik_gps_snapshot_files_list_command
empty_response = util_load_json(
os.path.join(os.path.dirname(__file__), 'test_data/snapshot_files_list_empty_response.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=empty_response)
args = {
'snapshot_id': "90858c2f-e572-5b9c-b455-ba309d50c1a2"
}
list_snapshot_files_command_results = rubrik_gps_snapshot_files_list_command(client, args)
assert list_snapshot_files_command_results.readable_output == MESSAGES["NO_RECORDS_FOUND"].format("files")
assert list_snapshot_files_command_results.outputs is None
@pytest.mark.parametrize("args, error", [
({"snapshot_id": ""}, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format("snapshot_id")),
({"snapshot_id": "1234-5678-9012", "limit": "a"}, "\"a\" is not a valid number"),
({"snapshot_id": "1234-5678-9012", "limit": 1001}, ERROR_MESSAGES['INVALID_LIMIT'].format("1001"))
])
def test_snapshot_files_list_when_invalid_arguments_are_provided(client, requests_mock, args, error):
"""Tests rubrik_gps_snapshot_files_list_command when invalid arguments provided."""
from RubrikPolaris import rubrik_gps_snapshot_files_list_command
response = {"data": {}}
requests_mock.post(BASE_URL_GRAPHQL, json=response)
with pytest.raises(ValueError) as e:
rubrik_gps_snapshot_files_list_command(client, args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_gps_vm_export_success(client, requests_mock, empty_response):
"""Tests success for rubrik-gps-vm-export."""
from RubrikPolaris import rubrik_gps_vm_export_command
vm_export_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/gps_vm_export_success.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/gps_vm_export_hr.md"), 'r') as f:
vm_export_response_hr = f.read()
args = {
"object_id": "dc4f1b47-da71-5a62-a4eb-b94406d74cbc",
"datastore_id": "711f8a94-c7dd-5ea9-afe9-2d8e44d09d3d",
"host_id": "f57bfebf-c7c9-5310-a5fd-1f0aeea5ba25",
"snapshot_id": "e9e1980f-11f0-53f3-84d6-15f60264b63b"
}
if empty_response:
requests_mock.post(BASE_URL_GRAPHQL, json=vm_export_response.get('empty_response'))
response = rubrik_gps_vm_export_command(client, args=args)
assert response.readable_output == MESSAGES['NO_RECORDS_FOUND'].format('vm export')
else:
requests_mock.post(BASE_URL_GRAPHQL, json=vm_export_response.get('raw_response'))
response = rubrik_gps_vm_export_command(client, args=args)
assert response.raw_response == vm_export_response.get('raw_response')
assert response.outputs == remove_empty_elements(vm_export_response.get('outputs'))
assert response.readable_output == vm_export_response_hr
@pytest.mark.parametrize("args, error", [
({"datastore_id": "dummy_id", "host_id": "dummy_id", "snapshot_id": "dummy_id"},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('object_id')),
({"object_id": "dummy_id", "host_id": "dummy_id", "snapshot_id": "dummy_id"},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('datastore_id')),
({"object_id": "dummy_id", "datastore_id": "dummy_id", "snapshot_id": "dummy_id"},
ERROR_MESSAGES['MISSING_EXPORT_DESTINATION']),
({"object_id": "dummy_id", "datastore_id": "dummy_id", "host_id": "dummy_id"},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('snapshot_id')),
({"object_id": "dummy_id", "datastore_id": "dummy_id", "host_id": "dummy_id", "snapshot_id": "dummy_id",
"power_on": "dummy"}, ERROR_MESSAGES['INVALID_BOOLEAN'].format("dummy", "power_on")),
({"object_id": "dummy_id", "datastore_id": "dummy_id", "host_id": "dummy_id", "snapshot_id": "dummy_id",
"keep_mac_addresses": "dummy"}, ERROR_MESSAGES['INVALID_BOOLEAN'].format("dummy", "keep_mac_addresses")),
({"object_id": "dummy_id", "datastore_id": "dummy_id", "host_id": "dummy_id", "snapshot_id": "dummy_id",
"remove_network_devices": "dummy"}, ERROR_MESSAGES['INVALID_BOOLEAN'].format("dummy", "remove_network_devices")),
({"object_id": "dummy_id", "datastore_id": "dummy_id", "host_id": "dummy_id", "snapshot_id": "dummy_id",
"recover_tags": "dummy"}, ERROR_MESSAGES['INVALID_BOOLEAN'].format("dummy", "recover_tags")),
({"object_id": "dummy_id", "datastore_id": "dummy_id", "host_id": "dummy_id", "snapshot_id": "dummy_id",
"disable_network": "dummy"}, ERROR_MESSAGES['INVALID_BOOLEAN'].format("dummy", "disable_network"))
])
def test_gps_vm_export_when_invalid_arguments_are_provided(client, args, error):
"""Tests invalid arguments for rubrik-gps-vm-export."""
from RubrikPolaris import rubrik_gps_vm_export_command
with pytest.raises(ValueError) as e:
rubrik_gps_vm_export_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("object_type, show_cluster_slas_only, limit, exception, error",
[("ABC_OBJECT", "", "100", ValueError,
SDK_ERROR_MESSAGES['INVALID_SLA_LIST_OBJECT_TYPE'].format(['ABC_OBJECT'])),
("ABC_OBJECT, DEF_OBJECT", "", "100", ValueError,
SDK_ERROR_MESSAGES['INVALID_SLA_LIST_OBJECT_TYPE'].format(['ABC_OBJECT', 'DEF_OBJECT'])),
("FILESET_OBJECT_TYPE", "abc", "", ValueError,
ERROR_MESSAGES['INVALID_BOOLEAN'].format("abc", "show_cluster_slas_only")),
("FILESET_OBJECT_TYPE, VSPHERE_OBJECT_TYPE", "", "abc", ValueError,
"\"abc\" is not a valid number"),
("VSPHERE_OBJECT_TYPE, FILESET_OBJECT_TYPE", "", "1001", ValueError,
ERROR_MESSAGES['INVALID_LIMIT'].format("1001")),
("VSPHERE_OBJECT_TYPE", "", "-2", ValueError, ERROR_MESSAGES['INVALID_LIMIT'].format("-2")),
("VSPHERE_OBJECT_TYPE", "", "0", ValueError, ERROR_MESSAGES['INVALID_LIMIT'].format("0"))
])
def test_gps_sla_domain_list_when_invalid_input(client, requests_mock, object_type, show_cluster_slas_only,
limit, exception, error):
"""Tests rubrik_gps_sla_domain_list when inputs are invalid."""
from RubrikPolaris import rubrik_gps_sla_domain_list
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
'test_data/gps_sla_domain_list_response.json'))
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('sla_object_type_enum')},
{'json': enum_values.get('sla_query_sort_by_field_enum')},
{'json': enum_values.get('sort_order_enum')},
{'json': raw_response}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
args = {
"name": "",
"cluster_id": "",
"object_type": object_type,
"show_cluster_slas_only": show_cluster_slas_only,
"sort_by": "",
"sort_order": "",
"limit": limit,
"next_page_token": ""
}
with pytest.raises(exception) as e:
rubrik_gps_sla_domain_list(client, args)
assert str(e.value) == error
def test_gps_sla_domain_list_when_empty_response(client, requests_mock):
"""Tests rubrik_gps_sla_domain_list when empty response is returned."""
from RubrikPolaris import rubrik_gps_sla_domain_list
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
'test_data/gps_sla_domain_list_empty_response.json'))
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('sla_query_sort_by_field_enum')},
{'json': enum_values.get('sort_order_enum')},
{'json': raw_response}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
gps_sla_domain_list_command_results = rubrik_gps_sla_domain_list(client, {})
assert gps_sla_domain_list_command_results.readable_output == MESSAGES["NO_RECORDS_FOUND"].format("sla domains")
assert gps_sla_domain_list_command_results.outputs is None
def test_gps_sla_domain_list_when_success_response(client, requests_mock):
"""Tests rubrik_gps_sla_domain_list when response is success."""
from RubrikPolaris import rubrik_gps_sla_domain_list
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
'test_data/gps_sla_domain_list_success_response.json'))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/gps_sla_domain_list_success_hr.md"), 'r') as f:
gps_sla_domain_list_hr = f.read()
gps_sla_domain_list_outputs = util_load_json(os.path.join(
os.path.dirname(__file__), 'test_data/gps_sla_domain_list_success_outputs.json'))
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('sla_object_type_enum')},
{'json': enum_values.get('sla_query_sort_by_field_enum')},
{'json': enum_values.get('sort_order_enum')},
{'json': raw_response}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
args = {
"name": "",
"cluster_id": "",
"object_type": "FILESET_OBJECT_TYPE, VSPHERE_OBJECT_TYPE",
"show_cluster_slas_only": "false",
"sort_by": "NAME",
"sort_order": "DESC",
"limit": "2",
"next_page_token": ""
}
gps_sla_domain_list_command_results = rubrik_gps_sla_domain_list(client, args)
assert gps_sla_domain_list_command_results.raw_response == raw_response
assert gps_sla_domain_list_command_results.readable_output == gps_sla_domain_list_hr
assert gps_sla_domain_list_command_results.outputs == gps_sla_domain_list_outputs
@pytest.mark.parametrize("empty_response", [True, False])
def test_user_downloads_list_success(client, requests_mock, empty_response):
"""Tests success for rubrik_user_downloads_list."""
from RubrikPolaris import rubrik_user_downloads_list_command
user_downloads_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/user_downloads_get_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/user_downloads_get_hr.md"), 'r') as f:
user_downloads_hr = f.read()
args = {"object_id": "dummy", "cluster_id": "dummy", "snapshot_id": "dummy"}
if empty_response:
response = user_downloads_response.get('empty_response')
requests_mock.post(BASE_URL_GRAPHQL, json=response)
response = rubrik_user_downloads_list_command(client, args=args)
assert response.readable_output == MESSAGES['NO_RECORDS_FOUND'].format('user downloads')
else:
responses = user_downloads_response.get('raw_response')
requests_mock.post(BASE_URL_GRAPHQL, json=responses)
response = rubrik_user_downloads_list_command(client, args=args)
assert response.raw_response == user_downloads_response.get('raw_response')
assert response.outputs == remove_empty_elements(user_downloads_response.get('outputs'))
assert response.readable_output == user_downloads_hr
@pytest.mark.parametrize("empty_response", [True, False])
def test_sonar_csv_result_download_success(client, requests_mock, empty_response):
"""Tests success for rubrik_sonar_csv_result_download."""
from RubrikPolaris import rubrik_sonar_csv_result_download_command
sonar_csv_download_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/sonar_csv_result_download_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/sonar_csv_result_download_hr.md"), 'r') as f:
sonar_csv_download_hr = f.read()
args = {"download_id": 1}
if empty_response:
requests_mock.post(BASE_URL_GRAPHQL, json=sonar_csv_download_response.get('empty_response'))
response = rubrik_sonar_csv_result_download_command(client, args=args)
assert response.readable_output == MESSAGES["NO_RESPONSE"]
else:
requests_mock.post(BASE_URL_GRAPHQL, json=sonar_csv_download_response.get('raw_response'))
response = rubrik_sonar_csv_result_download_command(client, args=args)
assert response.raw_response == sonar_csv_download_response.get('raw_response')
assert response.outputs == remove_empty_elements(sonar_csv_download_response.get('outputs'))
assert response.readable_output == sonar_csv_download_hr
@pytest.mark.parametrize("args, error", [
({"download_id": ""},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('download_id')),
({"download_id": "a"}, "\"a\" is not a valid number")
])
def test_sonar_csv_result_download_when_invalid_arguments_are_provided(client, args, error):
"""Tests invalid arguments for rubrik_sonar_csv_result_download."""
from RubrikPolaris import rubrik_sonar_csv_result_download_command
with pytest.raises(ValueError) as e:
rubrik_sonar_csv_result_download_command(client, args=args)
assert str(e.value) == error
def test_gps_vm_snapshot_create_when_object_id_is_not_provided(client):
"""Tests invalid arguments for rubrik_gps_vm_snapshot_create."""
from RubrikPolaris import rubrik_gps_vm_snapshot_create
args = {
"object_id": "",
"sla_domain_id": ""
}
with pytest.raises(ValueError) as e:
rubrik_gps_vm_snapshot_create(client, args=args)
assert str(e.value) == ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('object_id')
def test_gps_vm_snapshot_create_when_empty_response(client, requests_mock):
"""Tests rubrik_gps_vm_snapshot_create when empty response is returned."""
from RubrikPolaris import rubrik_gps_vm_snapshot_create
empty_response = util_load_json(
os.path.join(os.path.dirname(__file__), 'test_data/gps_vm_snapshot_create_empty_response.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=empty_response)
args = {
"object_id": "dummy-object-id",
"sla_domain_id": ""
}
gps_vm_snapshot_create_command_results = rubrik_gps_vm_snapshot_create(client, args)
assert gps_vm_snapshot_create_command_results.readable_output == MESSAGES["NO_RESPONSE"]
assert gps_vm_snapshot_create_command_results.outputs is None
def test_gps_vm_snapshot_create_when_success_response(client, requests_mock):
"""Tests rubrik_gps_vm_snapshot_create when response is success."""
from RubrikPolaris import rubrik_gps_vm_snapshot_create
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
'test_data/gps_vm_snapshot_create_success_response.json'))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/gps_vm_snapshot_create_success_hr.md"), 'r') as f:
gps_vm_snapshot_create_scan_hr = f.read()
gps_vm_snapshot_create_outputs = util_load_json(os.path.join(
os.path.dirname(__file__), 'test_data/gps_vm_snapshot_create_success_outputs.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=raw_response)
args = {
"object_id": "dummy-object-id",
"sla_domain_id": ""
}
gps_vm_snapshot_create_command_results = rubrik_gps_vm_snapshot_create(client, args)
assert gps_vm_snapshot_create_command_results.raw_response == raw_response
assert gps_vm_snapshot_create_command_results.readable_output == gps_vm_snapshot_create_scan_hr
assert gps_vm_snapshot_create_command_results.outputs == gps_vm_snapshot_create_outputs
@pytest.mark.parametrize("empty_response", [True, False])
def test_gps_snapshot_file_download_success(client, requests_mock, empty_response):
"""Tests success for rubrik_gps_snapshot_file_download."""
from RubrikPolaris import rubrik_gps_snapshot_files_download_command
gps_snapshot_file_download_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/gps_snapshot_file_download_response"
".json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/gps_snapshot_file_download_hr.md"), 'r') as f:
gps_snapshot_file_download_hr = f.read()
args = {"snapshot_id": 1, "file_path": "a"}
if empty_response:
requests_mock.post(BASE_URL_GRAPHQL, json=gps_snapshot_file_download_response.get('empty_response'))
response = rubrik_gps_snapshot_files_download_command(client, args=args)
assert response.readable_output == MESSAGES["NO_RESPONSE"]
else:
requests_mock.post(BASE_URL_GRAPHQL, json=gps_snapshot_file_download_response.get('raw_response'))
response = rubrik_gps_snapshot_files_download_command(client, args=args)
assert response.raw_response == gps_snapshot_file_download_response.get('raw_response')
assert response.outputs == remove_empty_elements(gps_snapshot_file_download_response.get('outputs'))
assert response.readable_output == gps_snapshot_file_download_hr
@pytest.mark.parametrize("args, error", [
({"snapshot_id": "", "file_path": "abc"},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('snapshot_id')),
({"file_path": "", "snapshot_id": "abc"},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('file_path')),
])
def test_gps_snapshot_file_download_when_invalid_arguments_are_provided(client, args, error):
"""Tests invalid arguments for rubrik_gps_snapshot_file_download."""
from RubrikPolaris import rubrik_gps_snapshot_files_download_command
with pytest.raises(ValueError) as e:
rubrik_gps_snapshot_files_download_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("snapshot_id, bool_value, field_name, exception, error",
[("", None, None, ValueError,
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format("snapshot_id")),
("dummy_id", 'Abc', "power_on", ValueError,
ERROR_MESSAGES['INVALID_BOOLEAN'].format("Abc", "power_on")),
("dummy_id", 'Abc', "keep_mac_addresses", ValueError,
ERROR_MESSAGES['INVALID_BOOLEAN'].format("Abc", "keep_mac_addresses")),
("dummy_id", 'Abc', "remove_network_devices", ValueError,
ERROR_MESSAGES['INVALID_BOOLEAN'].format("Abc", "remove_network_devices")),
("dummy_id", 'Abc', "disable_network", ValueError,
ERROR_MESSAGES['INVALID_BOOLEAN'].format("Abc", "disable_network")),
("dummy_id", 'Abc', "recover_tags", ValueError,
ERROR_MESSAGES['INVALID_BOOLEAN'].format("Abc", "recover_tags"))
])
def test_gps_vm_livemount_when_invalid_input(requests_mock, snapshot_id, bool_value, field_name, exception, error):
"""Tests rubrik_gps_vm_livemount when inputs are invalid."""
from RubrikPolaris import rubrik_gps_vm_livemount
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
'test_data/gps_vm_livemount_success_response.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=raw_response)
args = {
"snapshot_id": snapshot_id,
f"{field_name}": bool_value
}
with pytest.raises(exception) as e:
rubrik_gps_vm_livemount(client, args)
assert str(e.value) == error
def test_gps_vm_livemount_when_empty_response(client, requests_mock):
"""Tests rubrik_gps_vm_livemount when empty response is returned."""
from RubrikPolaris import rubrik_gps_vm_livemount
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
'test_data/gps_vm_livemount_empty_response.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=raw_response)
gps_vm_livemount_command_results = rubrik_gps_vm_livemount(client, {"snapshot_id": "dummy_id"})
assert gps_vm_livemount_command_results.readable_output == MESSAGES["NO_RESPONSE"]
assert gps_vm_livemount_command_results.outputs is None
def test_gps_vm_livemount_list_when_success_response(client, requests_mock):
"""Tests rubrik_gps_vm_livemount when response is success."""
from RubrikPolaris import rubrik_gps_vm_livemount
raw_response = util_load_json(os.path.join(os.path.dirname(__file__),
'test_data/gps_vm_livemount_success_response.json'))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/gps_vm_livemount_success_hr.md"), 'r') as f:
gps_vm_livemount_hr = f.read()
gps_vm_livemount_outputs = util_load_json(os.path.join(
os.path.dirname(__file__), 'test_data/gps_vm_livemount_success_outputs.json'))
requests_mock.post(BASE_URL_GRAPHQL, json=raw_response)
args = {
"snapshot_id": "dummy_id"
}
gps_vm_livemount_command_results = rubrik_gps_vm_livemount(client, args)
assert gps_vm_livemount_command_results.raw_response == raw_response
assert gps_vm_livemount_command_results.readable_output == gps_vm_livemount_hr
assert gps_vm_livemount_command_results.outputs == gps_vm_livemount_outputs
@pytest.mark.parametrize("empty_response", [True, False])
def test_gps_vm_host_list_success(client, requests_mock, empty_response):
"""Tests success for rubrik-gps-vm-host-list."""
from RubrikPolaris import rubrik_gps_vm_host_list_command
vm_host_list_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/gps_vm_host_list_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/gps_vm_host_list_hr.md"), 'r') as f:
vm_host_list_response_hr = f.read()
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('sort_by_enum')},
{'json': enum_values.get('sort_order_enum')}
]
if empty_response:
responses.append({'json': vm_host_list_response.get('empty_response')})
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_gps_vm_host_list_command(client, args={})
assert response.readable_output == MESSAGES['NO_RECORDS_FOUND'].format('vm hosts')
else:
responses.append({'json': vm_host_list_response.get('raw_response')})
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_gps_vm_host_list_command(client, args={})
assert response.raw_response == vm_host_list_response.get('raw_response')
assert response.outputs.get(f'{OUTPUT_PREFIX["GPS_VM_HOSTS"]}(val.id == obj.id)') == \
remove_empty_elements(vm_host_list_response.get('outputs'))
assert response.readable_output == vm_host_list_response_hr
@pytest.mark.parametrize("args, error", [
({"limit": "a"}, '"a" is not a valid number')
])
def test_gps_vm_host_list_when_invalid_arguments_are_provided(client, args, error):
"""Tests invalid arguments for rubrik-gps-vm-host-list."""
from RubrikPolaris import rubrik_gps_vm_host_list_command
with pytest.raises(ValueError) as e:
rubrik_gps_vm_host_list_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_gps_vm_datastore_list_success(client, requests_mock, empty_response):
"""Tests success for rubrik-gps-vm-datastore-list."""
from RubrikPolaris import rubrik_gps_vm_datastore_list_command
vm_datastore_list_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/gps_vm_datastore_list_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/gps_vm_datastore_list_hr.md"),
'r') as f:
vm_datastore_list_response_hr = f.read()
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('sort_by_enum')},
{'json': enum_values.get('sort_order_enum')}
]
args = {"host_id": "dummy_id", "name": "vm"}
if empty_response:
responses.append({'json': vm_datastore_list_response.get('empty_response')})
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_gps_vm_datastore_list_command(client, args=args)
assert response.readable_output == MESSAGES['NO_RECORDS_FOUND'].format('vm datastores')
else:
responses.append({'json': vm_datastore_list_response.get('raw_response')})
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_gps_vm_datastore_list_command(client, args=args)
assert response.raw_response == vm_datastore_list_response.get('raw_response')
assert response.outputs.get(f'{OUTPUT_PREFIX["GPS_VM_HOSTS"]}(val.id == obj.id)') == \
remove_empty_elements(vm_datastore_list_response.get('outputs'))
assert response.outputs.get(f'{OUTPUT_PREFIX["PAGE_TOKEN_VM_HOSTS"]}(val.name == obj.name)') == \
{"Datastore": remove_empty_elements(vm_datastore_list_response.get('page_token'))}
assert response.readable_output == vm_datastore_list_response_hr
@pytest.mark.parametrize("args, error", [
({"limit": "a"}, '"a" is not a valid number')
])
def test_gps_vm_datastore_list_when_invalid_arguments_are_provided(client, args, error):
"""Tests invalid arguments for rubrik-gps-vm-datastore-list."""
from RubrikPolaris import rubrik_gps_vm_datastore_list_command
with pytest.raises(ValueError) as e:
rubrik_gps_vm_datastore_list_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_cdm_cluster_connection_state_command_success(client, requests_mock, empty_response):
"""Tests success for rubrik-cdm-cluster-connection-state."""
from RubrikPolaris import cdm_cluster_connection_state_command
cdm_cluster_connection_state_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/cdm_cluster_connection_state_"
"response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/cdm_cluster_connection_state_hr.md"), 'r') as f:
cdm_cluster_connection_state_hr = f.read()
args = {"clusterId": "dummy"}
if empty_response:
requests_mock.post(BASE_URL_GRAPHQL, json=cdm_cluster_connection_state_response.get('empty_response'))
with pytest.raises(Exception) as e:
cdm_cluster_connection_state_command(client, args=args)
assert str(e.value) == "A CDM Cluster with an ID of {} was not found.".format("dummy")
else:
requests_mock.post(BASE_URL_GRAPHQL, json=cdm_cluster_connection_state_response.get('raw_response'))
response = cdm_cluster_connection_state_command(client, args=args)
assert response.raw_response == cdm_cluster_connection_state_response.get('outputs').get('Cluster') \
.get('ConnectionState')
assert response.outputs == remove_empty_elements(cdm_cluster_connection_state_response.get('outputs'))
assert response.readable_output == cdm_cluster_connection_state_hr
@pytest.mark.parametrize("args, error", [
({"clusterId": ""}, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('clusterId'))
])
def test_cdm_cluster_connection_state_command_when_invalid_arguments_are_provided(client, args, error):
"""Tests invalid arguments for rubrik-cdm-cluster-connection-state."""
from RubrikPolaris import cdm_cluster_connection_state_command
with pytest.raises(ValueError) as e:
cdm_cluster_connection_state_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_cdm_cluster_location_command_command_success(client, requests_mock, empty_response):
"""Tests success for rubrik-cdm-cluster-location."""
from RubrikPolaris import cdm_cluster_location_command
cdm_cluster_location_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/cdm_cluster_location_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/cdm_cluster_location_hr.md"), 'r') as f:
cdm_cluster_location_hr = f.read()
args = {"clusterId": "dummy"}
if empty_response:
requests_mock.post(BASE_URL_GRAPHQL, json=cdm_cluster_location_response.get('empty_response'))
with pytest.raises(Exception) as e:
cdm_cluster_location_command(client, args=args)
assert str(e.value) == "A CDM Cluster with an ID of {} was not found.".format("dummy")
else:
requests_mock.post(BASE_URL_GRAPHQL, json=cdm_cluster_location_response.get('raw_response'))
response = cdm_cluster_location_command(client, args=args)
assert response.raw_response == cdm_cluster_location_response.get('outputs').get('Cluster').get('Location')
assert response.outputs == remove_empty_elements(cdm_cluster_location_response.get('outputs'))
assert response.readable_output == cdm_cluster_location_hr
def test_cdm_cluster_location_command_when_key_not_present(client, requests_mock):
"""Tests rubrik-cdm-cluster-location command when some of the keys are not present in response."""
from RubrikPolaris import cdm_cluster_location_command
cdm_cluster_location_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/cdm_cluster_location_response.json"))
requests_mock.post(BASE_URL_GRAPHQL, json=cdm_cluster_location_response.get('empty_location'))
response = cdm_cluster_location_command(client, args={"clusterId": "dummy"})
assert response.readable_output == MESSAGES['NO_RESPONSE']
@pytest.mark.parametrize("args, error", [
({"clusterId": ""}, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('clusterId'))
])
def test_cdm_cluster_location_command_when_invalid_arguments_are_provided(client, args, error):
"""Tests invalid arguments for rubrik-cdm-cluster-location."""
from RubrikPolaris import cdm_cluster_location_command
with pytest.raises(ValueError) as e:
cdm_cluster_location_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_radar_analysis_status_command_success(client, requests_mock, empty_response):
"""Tests success for rubrik-radar-analysis-status."""
from RubrikPolaris import radar_analysis_status_command
radar_analysis_status_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/radar_analysis_status_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/radar_analysis_status_hr.md"), 'r') as f:
radar_analysis_status_hr = f.read()
args = {"activitySeriesId": "dummy", "clusterId": "dummy"}
if empty_response:
requests_mock.post(BASE_URL_GRAPHQL, json=radar_analysis_status_response.get('empty_response'))
response = radar_analysis_status_command(client, args=args)
assert response.readable_output == MESSAGES["NO_RECORDS_FOUND"].format("radar analysis status")
else:
requests_mock.post(BASE_URL_GRAPHQL, json=radar_analysis_status_response.get('raw_response'))
response = radar_analysis_status_command(client, args=args)
assert response.raw_response == radar_analysis_status_response.get('raw_response')
assert response.outputs == remove_empty_elements(radar_analysis_status_response.get('outputs'))
assert response.readable_output == radar_analysis_status_hr
@pytest.mark.parametrize("args, error", [
({"activitySeriesId": "", "clusterId": ""}, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('activitySeriesId')),
({"activitySeriesId": "", "clusterId": "dummy"},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('activitySeriesId')),
({"activitySeriesId": "dummy", "clusterId": ""}, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('clusterId'))
])
def test_radar_analysis_status_command_when_invalid_arguments_are_provided(client, args, error):
"""Tests invalid arguments for rubrik-radar-analysis-status."""
from RubrikPolaris import radar_analysis_status_command
with pytest.raises(ValueError) as e:
radar_analysis_status_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_event_list_success(client, requests_mock, empty_response):
"""Tests success for rubrik-event-list."""
from RubrikPolaris import rubrik_event_list_command
event_list_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/event_list_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/event_list_response_hr.md"),
'r') as f:
event_list_response_hr = f.read()
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('event_sort_by_enum')},
{'json': enum_values.get('event_sort_order_enum')}
]
args = {}
if empty_response:
responses.append({'json': event_list_response.get('empty_response')})
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_event_list_command(client, args=args)
assert response.readable_output == MESSAGES['NO_RECORDS_FOUND'].format('events')
else:
responses.append({'json': event_list_response.get('raw_response')})
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_event_list_command(client, args=args)
assert response.raw_response == event_list_response.get('raw_response')
assert response.outputs.get(f'{OUTPUT_PREFIX["EVENT"]}(val.id == obj.id)') == \
remove_empty_elements(event_list_response.get('outputs'))
assert response.outputs.get(f'{OUTPUT_PREFIX["PAGE_TOKEN_EVENT"]}(val.name == obj.name)') == \
remove_empty_elements(event_list_response.get('page_token'))
assert response.readable_output == event_list_response_hr
@pytest.mark.parametrize("args, error", [
({"limit": "a"}, '"a" is not a valid number'),
({"start_date": "aaa"}, '"aaa" is not a valid date'),
({"end_date": "a111"}, '"a111" is not a valid date'),
({"limit": -1}, ERROR_MESSAGES['INVALID_LIMIT'].format(-1))
])
def test_event_list_when_invalid_arguments_are_provided(client, args, error):
"""Tests invalid arguments for rubrik-event-list."""
from RubrikPolaris import rubrik_event_list_command
with pytest.raises(ValueError) as e:
rubrik_event_list_command(client, args=args)
assert str(e.value) == error
def test_sonar_sensitive_hits_success(client, requests_mock):
"""
Test case scenario for successful execution of rubrik-sonar-sensitive-hits command with a valid response.
When:
-calling rubrik-sonar-sensitive-hits command
Then:
-Verifies mock response with actual response obtained
"""
from RubrikPolaris import sonar_sensitive_hits_command
sonar_sensitive_hits_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/sonar_sensitive_hits_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/sonar_sensitive_hits_response_hr.md"), 'r') as f:
sonar_sensitive_hits_response_hr = f.read()
responses = [
{'json': sonar_sensitive_hits_response.get('raw_response_list')},
{'json': sonar_sensitive_hits_response.get('raw_response')}
]
args = {}
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = sonar_sensitive_hits_command(client, args=args)
assert response.raw_response == sonar_sensitive_hits_response.get('raw_response')
assert response.outputs == remove_empty_elements(sonar_sensitive_hits_response.get('outputs'))
assert response.readable_output == sonar_sensitive_hits_response_hr
def test_sonar_sensitive_hits_when_response_is_empty(client, requests_mock):
"""
Test case scenario for successful execution of rubrik-sonar-sensitive-hits command with an empty response.
When:
-calling rubrik-sonar-sensitive-hits command
Then:
-Verifies mock response with empty message obtained in HR
"""
from RubrikPolaris import sonar_sensitive_hits_command
sonar_sensitive_hits_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/sonar_sensitive_hits_response.json"))
responses = [
{'json': sonar_sensitive_hits_response.get('raw_response_list')},
{'json': sonar_sensitive_hits_response.get('empty_response')}
]
args = {}
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = sonar_sensitive_hits_command(client, args=args)
assert response.raw_response is None
assert response.readable_output == MESSAGES['NO_RECORDS_FOUND'].format('sensitive hits')
@pytest.mark.parametrize("args, error", [
({"searchTimePeriod": "a"}, '"a" is not a valid number')
])
def test_sonar_sensitive_hits_when_invalid_arguments_are_provided(client, args, error):
"""
Tests invalid arguments for rubrik-sonar-sensitive-hits.
Given:
-args: contains arguments for the command
When:
-Invalid value is passed in arguments
Then:
-Raises ValueError and asserts error message
"""
from RubrikPolaris import sonar_sensitive_hits_command
with pytest.raises(ValueError) as e:
sonar_sensitive_hits_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_object_list_success(client, requests_mock, empty_response):
"""
Test case scenario for successful execution of rubrik-polaris-object-list command with a valid and an empty response.
When:
-calling rubrik-polaris-object-list command
Then:
-Verifies mock response with actual response
"""
from RubrikPolaris import rubrik_polaris_object_list_command
object_list_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/object_list_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/object_list_response_hr.md"),
'r') as f:
object_list_response_hr = f.read()
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('sort_by_enum')},
{'json': enum_values.get('sort_order_enum')}
]
args = {"sort_order": "ASC"}
if empty_response:
responses.append({'json': object_list_response.get('empty_response')})
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_polaris_object_list_command(client, args=args)
assert response.readable_output == MESSAGES['NO_RECORDS_FOUND'].format('objects')
else:
responses.append({'json': object_list_response.get('raw_response')})
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_polaris_object_list_command(client, args=args)
assert response.raw_response == object_list_response.get('raw_response')
assert response.outputs.get(f'{OUTPUT_PREFIX["OBJECT"]}(val.id == obj.id)') == \
remove_empty_elements(object_list_response.get('outputs'))
assert response.outputs.get(f'{OUTPUT_PREFIX["PAGE_TOKEN_OBJECT"]}(val.name == obj.name)') == \
remove_empty_elements(object_list_response.get('page_token'))
assert response.readable_output == object_list_response_hr
@pytest.mark.parametrize("args, error", [
({"limit": "a"}, '"a" is not a valid number'),
({"limit": -1}, ERROR_MESSAGES['INVALID_LIMIT'].format(-1)),
({"sort_order": "asc"}, SDK_ERROR_MESSAGES['INVALID_SORT_ORDER'].format('asc'))
])
def test_object_list_when_invalid_arguments_are_provided(client, args, error, requests_mock):
"""
Test case scenario for invalid arguments for rubrik-polaris-object-list.
Given:
-args: contains arguments for the command
When:
-Invalid value is passed in arguments
Then:
-Raises ValueError and asserts error message
"""
from RubrikPolaris import rubrik_polaris_object_list_command
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('sort_by_enum')},
{'json': enum_values.get('sort_order_enum')}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
with pytest.raises(ValueError) as e:
rubrik_polaris_object_list_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_polaris_object_snapshot_list_success(client, requests_mock, empty_response):
"""Tests success for rubrik-polaris-object-snapshot-list."""
from RubrikPolaris import rubrik_polaris_object_snapshot_list_command
object_snapshot_list_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/object_snapshot_list_response.json"))
with open(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/object_snapshot_list_response_hr.md"),
'r') as f:
object_snapshot_list_response_hr = f.read()
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('event_sort_order_enum')}
]
args = {"object_id": "06515737-388a-57aa-9c8e-54b3f1ee5d8b", "sort_order": "Asc"}
if empty_response:
responses.append({'json': object_snapshot_list_response.get('empty_response')})
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_polaris_object_snapshot_list_command(client, args=args)
assert response.readable_output == MESSAGES['NO_RECORDS_FOUND'].format('object snapshots')
else:
responses.append({'json': object_snapshot_list_response.get('raw_response')})
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_polaris_object_snapshot_list_command(client, args=args)
assert response.raw_response == object_snapshot_list_response.get('raw_response')
assert response.outputs.get(f'{OUTPUT_PREFIX["OBJECT"]}(val.id == obj.id)') == \
remove_empty_elements(object_snapshot_list_response.get('outputs'))
assert response.outputs.get(f'{OUTPUT_PREFIX["PAGE_TOKEN_OBJECT"]}(val.name == obj.name)') == \
remove_empty_elements(object_snapshot_list_response.get('page_token'))
assert response.readable_output == object_snapshot_list_response_hr
@pytest.mark.parametrize("args, error", [
({"object_id": ""}, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('object_id')),
({"object_id": "1", "limit": "a"}, '"a" is not a valid number'),
({"object_id": "1", "start_date": "aaa"}, '"aaa" is not a valid date'),
({"object_id": "1", "end_date": "a111"}, '"a111" is not a valid date'),
({"object_id": "1", "limit": -1}, ERROR_MESSAGES['INVALID_LIMIT'].format(-1)),
({"object_id": "1", "sort_order": "as"}, SDK_ERROR_MESSAGES['INVALID_OBJECT_SNAPSHOT_SORT_ORDER'].format('as'))
])
def test_polaris_object_snapshot_list_when_invalid_arguments_are_provided(client, args, error, requests_mock):
"""Tests invalid arguments for rubrik-polaris-object-snapshot-list."""
from RubrikPolaris import rubrik_polaris_object_snapshot_list_command
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('event_sort_order_enum')}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
with pytest.raises(ValueError) as e:
rubrik_polaris_object_snapshot_list_command(client, args=args)
assert str(e.value) == error
radar_ioc_scan_invalid_args = [
({"cluster_id": ""}, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('cluster_id')),
({"cluster_id": "dummy-cluster-id", "object_id": ""},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('object_id')),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id",
"ioc_type": "INDICATOR_OF_COMPROMISE_TYPE_HASH", "ioc_value": "",
"start_date": "abc"}, 'Invalid date: "start_date"="abc"'),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id",
"ioc_type": "INDICATOR_OF_COMPROMISE_TYPE_HASH", "ioc_value": "",
"end_date": "abc"}, 'Invalid date: "end_date"="abc"'),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id",
"ioc_type": "INDICATOR_OF_COMPROMISE_TYPE_HASH", "ioc_value": "",
"max_snapshots_per_object": "abc"}, 'Invalid number: "max_snapshots_per_object"="abc"'),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1, dummy-object-id-2",
"ioc_type": "INDICATOR_OF_COMPROMISE_TYPE_HASH", "ioc_value": "",
"snapshot_id": "dummy-snapshot-id-1-1, dummy-snapshot-id-1-2"}, ERROR_MESSAGES['LEN_SNAPSHOT_NE_LEN_OBJECT']),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1, dummy-object-id-2",
"ioc_type": "INDICATOR_OF_COMPROMISE_TYPE_HASH", "ioc_value": "",
"snapshot_id": "dummy-snapshot-id-1-1, dummy-snapshot-id-1-2: dummy-snapshot-id-2-1: dummy-snapshot-id-3-1"},
ERROR_MESSAGES['LEN_SNAPSHOT_NE_LEN_OBJECT']),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1",
"ioc_type": "INDICATOR_OF_COMPROMISE_TYPE_HASH", "ioc_value": "",
"snapshot_id": "dummy-snapshot-id-1-1, dummy-snapshot-id-1-2: dummy-snapshot-id-2-1"},
ERROR_MESSAGES['LEN_SNAPSHOT_NE_LEN_OBJECT']),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1",
"ioc_type": "abc", "ioc_value": ""},
ERROR_MESSAGES["INVALID_SELECT"].format('abc', 'ioc_type', IOC_TYPE_ENUM)),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1",
"ioc_type": "", "ioc_value": ""},
ERROR_MESSAGES["NO_INDICATOR_SPECIFIED"]),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1",
"ioc_type": "", "ioc_value": "", "advance_ioc": ""},
ERROR_MESSAGES["NO_INDICATOR_SPECIFIED"]),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1",
"ioc_type": "", "ioc_value": "", "advance_ioc": "{}"},
ERROR_MESSAGES["NO_INDICATOR_SPECIFIED"]),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1",
"ioc_type": "", "ioc_value": "", "advance_ioc": "[]"},
ERROR_MESSAGES["NO_INDICATOR_SPECIFIED"]),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1",
"ioc_type": "", "ioc_value": "", "advance_ioc": "["},
ERROR_MESSAGES["JSON_DECODE"].format('advance_ioc')),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1",
"ioc_type": "", "ioc_value": "", "advance_ioc": "{\"path_or_filename\": \"\""},
ERROR_MESSAGES["JSON_DECODE"].format('advance_ioc')),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1",
"ioc_type": "", "ioc_value": "", "advance_ioc": "[\"path_or_filename\": \"\"]"},
ERROR_MESSAGES["JSON_DECODE"].format('advance_ioc')),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1",
"ioc_type": "", "ioc_value": "", "advance_ioc": "[{\"path_or_filename\": \"\"}]"},
ERROR_MESSAGES["INVALID_FORMAT"].format('advance_ioc')),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1",
"ioc_type": "INDICATOR_OF_COMPROMISE_TYPE_HASH", "ioc_value": "", "requested_hash_types": "WRONG_HASH_TYPE"},
SDK_ERROR_MESSAGES["INVALID_REQUESTED_HASH_TYPE"].format(["WRONG_HASH_TYPE"])),
({"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1",
"ioc_type": "INDICATOR_OF_COMPROMISE_TYPE_HASH", "ioc_value": "",
"requested_hash_types": "WRONG_HASH_TYPE1, WRONG_HASH_TYPE2"},
SDK_ERROR_MESSAGES["INVALID_REQUESTED_HASH_TYPE"].format(["WRONG_HASH_TYPE1", "WRONG_HASH_TYPE2"])),
]
@pytest.mark.parametrize("args, error", radar_ioc_scan_invalid_args)
def test_radar_ioc_scan_when_invalid_arguments_are_provided(client, requests_mock, args, error):
"""
Test case scenario for invalid arguments for rubrik-radar-ioc-scan.
Given:
-args: contains arguments for the command
When:
-Invalid value is passed in arguments
Then:
-Raises ValueError and asserts error message
"""
from RubrikPolaris import rubrik_radar_ioc_scan_command
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/enum_values.json"))
requests_mock.post(BASE_URL + "/graphql", json=enum_values.get('hash_type_enum'))
with pytest.raises(ValueError) as e:
rubrik_radar_ioc_scan_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_radar_ioc_scan_when_success(client, requests_mock, empty_response):
"""
Test case scenario for successful execution of rubrik-radar-ioc-scan command with a valid and an empty response.
When:
-calling rubrik-radar-ioc-scan command
Then:
-Verifies mock response with actual response
"""
from RubrikPolaris import rubrik_radar_ioc_scan_command
ioc_scan_response = util_load_json(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/ioc_scan_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/ioc_scan_response_hr.md"), 'r') as f:
ioc_scan_response_hr = f.read()
args = {"cluster_id": "dummy-cluster-id", "object_id": "dummy-object-id-1",
"ioc_type": "INDICATOR_OF_COMPROMISE_TYPE_HASH", "ioc_value": ""}
if empty_response:
requests_mock.post(BASE_URL_GRAPHQL, json=ioc_scan_response.get('empty_response'))
response = rubrik_radar_ioc_scan_command(client, args=args)
assert response.readable_output == MESSAGES['NO_RESPONSE']
else:
requests_mock.post(BASE_URL_GRAPHQL, json=ioc_scan_response.get('raw_response'))
response = rubrik_radar_ioc_scan_command(client, args=args)
assert response.raw_response == ioc_scan_response.get('raw_response')
assert response.outputs == ioc_scan_response.get('outputs')
assert response.readable_output == ioc_scan_response_hr
@pytest.mark.parametrize("args, error", [
({"cluster_id": ""}, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format("cluster_id"))
])
def test_radar_ioc_scan_list_when_invalid_arguments_are_provided(client, args, error):
"""
Test case scenario for invalid arguments for rubrik-radar-ioc-scan-list.
Given:
-args: contains arguments for the command
When:
-Invalid value is passed in arguments
Then:
-Raises ValueError and asserts error message
"""
from RubrikPolaris import rubrik_radar_ioc_scan_list_command
with pytest.raises(ValueError) as e:
rubrik_radar_ioc_scan_list_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_radar_ioc_scan_list_when_success(client, requests_mock, empty_response):
"""
Test case scenario for successful execution of rubrik-radar-ioc-scan-list command with a valid and an empty response.
When:
-calling rubrik-radar-ioc-scan command
Then:
-Verifies mock response with actual response
"""
from RubrikPolaris import rubrik_radar_ioc_scan_list_command
ioc_scan_list_response = util_load_json(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/ioc_scan_list_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/ioc_scan_list_response_hr.md"),
'r') as f:
ioc_scan_list_response_hr = f.read()
args = {"cluster_id": "dummy-cluster-id"}
if empty_response:
requests_mock.post(BASE_URL_GRAPHQL, json=ioc_scan_list_response.get('empty_response'))
response = rubrik_radar_ioc_scan_list_command(client, args=args)
assert response.readable_output == MESSAGES['NO_RECORDS_FOUND'].format("ioc scans")
else:
requests_mock.post(BASE_URL_GRAPHQL, json=ioc_scan_list_response.get('raw_response'))
response = rubrik_radar_ioc_scan_list_command(client, args=args)
assert response.raw_response == ioc_scan_list_response.get('raw_response')
assert response.outputs == ioc_scan_list_response.get('outputs')
assert response.readable_output == ioc_scan_list_response_hr
@pytest.mark.parametrize("args, error", [
({"scan_id": "", "cluster_id": "dummy-cluster-id"}, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('scan_id')),
({"scan_id": "dummy-scan-id", "cluster_id": ""}, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('cluster_id'))
])
def test_radar_ioc_scan_results_when_invalid_arguments_are_provided(client, args, error):
"""
Test case scenario for invalid arguments for rubrik-radar-ioc-scan-results.
Given:
-args: contains arguments for the command
When:
-Invalid value is passed in arguments
Then:
-Raises ValueError and asserts error message
"""
from RubrikPolaris import rubrik_radar_ioc_scan_results_command
with pytest.raises(ValueError) as e:
rubrik_radar_ioc_scan_results_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_radar_ioc_scan_results_success(client, requests_mock, empty_response):
"""
Test case scenario for successful execution of rubrik-radar-ioc-scan-results command with a valid and an empty response.
When:
-calling rubrik-radar-ioc-scan-results command
Then:
-Verifies mock response with actual response
"""
from RubrikPolaris import rubrik_radar_ioc_scan_results_command
ioc_scan_results_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/radar_ioc_scan_results_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/radar_ioc_scan_results_response_hr.md"),
'r') as f:
ioc_scan_results_response_hr = f.read()
args = {"scan_id": "dummy-scan-id", "cluster_id": "dummy-cluster-id"}
if empty_response:
requests_mock.post(BASE_URL_GRAPHQL, json=ioc_scan_results_response.get('empty_response'))
response = rubrik_radar_ioc_scan_results_command(client, args=args)
assert response.readable_output == MESSAGES['NO_RESPONSE']
else:
requests_mock.post(BASE_URL_GRAPHQL, json=ioc_scan_results_response.get('raw_response'))
response = rubrik_radar_ioc_scan_results_command(client, args=args)
assert response.raw_response == ioc_scan_results_response.get('raw_response')
assert response.outputs == ioc_scan_results_response.get('outputs')
assert response.readable_output == ioc_scan_results_response_hr
@pytest.mark.parametrize("empty_response", [True, False])
def test_gps_async_result_command_success(client, requests_mock, empty_response):
"""
Test case scenario for successful execution of rubrik-gps-async-result command with a valid and an empty response.
When:
-calling rubrik-gps-async-result command
Then:
-Verifies mock response with actual response
"""
from RubrikPolaris import rubrik_gps_async_result_command
gps_async_result_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/gps_async_result_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/gps_async_result_response_hr.md"), 'r') as f:
gps_async_result_hr = f.read()
args = {"request_id": "dummy", "cluster_id": "dummy"}
if empty_response:
requests_mock.post(BASE_URL_GRAPHQL, json=gps_async_result_response.get('empty_response'))
response = rubrik_gps_async_result_command(client, args=args)
assert response.readable_output == MESSAGES["NO_RESPONSE"]
else:
requests_mock.post(BASE_URL_GRAPHQL, json=gps_async_result_response.get('raw_response'))
response = rubrik_gps_async_result_command(client, args=args)
assert response.raw_response == gps_async_result_response.get('raw_response')
assert response.outputs == remove_empty_elements(gps_async_result_response.get('outputs'))
assert response.readable_output == gps_async_result_hr
@pytest.mark.parametrize("args, error", [
({"request_id": "", "cluster_id": ""}, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('request_id')),
({"request_id": "dummy", "cluster_id": ""},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('cluster_id'))
])
def test_gps_async_result_command_when_invalid_arguments_are_provided(client, args, error):
"""
Test case scenario for invalid arguments for rubrik-gps-async-result.
Given:
-args: contains arguments for the command
When:
-Invalid value is passed in arguments
Then:
-Raises ValueError and asserts error message
"""
from RubrikPolaris import rubrik_gps_async_result_command
with pytest.raises(ValueError) as e:
rubrik_gps_async_result_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_gps_cluster_list_command_success(client, requests_mock, empty_response):
"""
Test case scenario for successful execution of rubrik-gps-cluster-list command with a valid and an empty response.
When:
-calling rubrik-gps-cluster-list command
Then:
-Verifies mock response with actual response
"""
from RubrikPolaris import rubrik_gps_cluster_list_command
gps_cluster_list_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/gps_cluster_list_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/gps_cluster_list_response_hr.md"), 'r') as f:
gps_cluster_list_hr = f.read()
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('cluster_list_sort_by_enum')},
{'json': enum_values.get('event_sort_order_enum')}
]
args = {}
if empty_response:
responses.append({'json': gps_cluster_list_response.get('empty_response')})
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_gps_cluster_list_command(client, args=args)
assert response.readable_output == MESSAGES["NO_RECORDS_FOUND"].format('clusters')
else:
responses.append({'json': gps_cluster_list_response.get('raw_response')})
requests_mock.post(BASE_URL_GRAPHQL, responses)
response = rubrik_gps_cluster_list_command(client, args=args)
assert response.raw_response == gps_cluster_list_response.get('raw_response')
assert response.outputs.get(f'{OUTPUT_PREFIX["GPS_CLUSTER"]}(val.id == obj.id)') \
== remove_empty_elements(gps_cluster_list_response.get('outputs'))
assert response.outputs.get(f"{OUTPUT_PREFIX['PAGE_TOKEN_GPS_CLUSTER']}(val.name == obj.name)") \
== remove_empty_elements(gps_cluster_list_response.get('page_token'))
assert response.readable_output == gps_cluster_list_hr
@pytest.mark.parametrize("args, error", [
({"limit": "a"}, '"a" is not a valid number'),
({"limit": -1}, ERROR_MESSAGES['INVALID_LIMIT'].format(-1)),
({"sort_order": "asc"}, SDK_ERROR_MESSAGES['INVALID_OBJECT_SNAPSHOT_SORT_ORDER'].format('asc'))
])
def test_gps_cluster_list_command_when_invalid_arguments_are_provided(client, requests_mock, args, error):
"""
Test case scenario for invalid arguments for rubrik-gps-cluster-list.
Given:
-args: contains arguments for the command
When:
-Invalid value is passed in arguments
Then:
-Raises ValueError and asserts error message
"""
from RubrikPolaris import rubrik_gps_cluster_list_command
enum_values = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
enum_values_file_path))
responses = [
{'json': enum_values.get('cluster_list_sort_by_enum')},
{'json': enum_values.get('event_sort_order_enum')}
]
requests_mock.post(BASE_URL_GRAPHQL, responses)
with pytest.raises(ValueError) as e:
rubrik_gps_cluster_list_command(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("args, error", [
({"snapshot_id": ""}, ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('snapshot_id')),
({"snapshot_id": "dummy-snapshot-id", "cluster_id": ""},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('cluster_id')),
({"snapshot_id": "dummy-snapshot-id", "cluster_id": "dummy-cluster-id", "paths_to_recover": ""},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('paths_to_recover')),
({"snapshot_id": "dummy-snapshot-id", "cluster_id": "dummy-cluster-id", "paths_to_recover": "/etc,/home",
"restore_path": ""},
ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format('restore_path')),
])
def test_gps_vm_recover_files_command_when_invalid_arguments_are_provided(client, args, error):
"""
Test case scenario for invalid arguments for rubrik-gps-vm-recover-files.
Given:
-args: contains arguments for the command
When:
-Invalid value is passed in arguments
Then:
-Raises ValueError and asserts error message
"""
from RubrikPolaris import rubrik_gps_vm_recover_files
with pytest.raises(ValueError) as e:
rubrik_gps_vm_recover_files(client, args=args)
assert str(e.value) == error
@pytest.mark.parametrize("empty_response", [True, False])
def test_gps_vm_recover_files_command_success(client, requests_mock, empty_response):
"""
Test case scenario for successful execution of rubrik-gps-vm-recover-files command with a valid and an empty response.
When:
-calling rubrik-gps-vm-recover-files command
Then:
-Verifies mock response with actual response
"""
from RubrikPolaris import rubrik_gps_vm_recover_files
gps_vm_recover_files_response = util_load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/gps_vm_recover_files_response.json"))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"test_data/gps_vm_recover_files_response_hr.md"), 'r') as f:
gps_vm_recover_files_hr = f.read()
args = {"snapshot_id": "dummy-snapshot-id", "cluster_id": "dummy-cluster-id", "paths_to_recover": "/etc,/home",
"restore_path": "/"}
if empty_response:
requests_mock.post(BASE_URL_GRAPHQL, json=gps_vm_recover_files_response.get('empty_response'))
response = rubrik_gps_vm_recover_files(client, args=args)
assert response.readable_output == MESSAGES["NO_RESPONSE"]
else:
requests_mock.post(BASE_URL_GRAPHQL, json=gps_vm_recover_files_response.get('raw_response'))
response = rubrik_gps_vm_recover_files(client, args=args)
assert response.raw_response == gps_vm_recover_files_response.get('raw_response')
assert response.outputs == gps_vm_recover_files_response.get('outputs')
assert response.readable_output == gps_vm_recover_files_hr
| 48.882403
| 124
| 0.687646
| 14,400
| 115,558
| 5.102778
| 0.030903
| 0.028579
| 0.017284
| 0.02074
| 0.923394
| 0.899265
| 0.867937
| 0.830321
| 0.775517
| 0.732458
| 0
| 0.005089
| 0.197304
| 115,558
| 2,363
| 125
| 48.903089
| 0.78708
| 0.075538
| 0
| 0.519976
| 0
| 0.002385
| 0.210104
| 0.085262
| 0
| 0
| 0
| 0
| 0.118664
| 1
| 0.054264
| false
| 0.002385
| 0.057245
| 0
| 0.112701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8a272cb886fe5e4406e99594008c4beb54cc5151
| 8,729
|
py
|
Python
|
operations/stock/migrations/0002_book_bookallocation_bookreplenishment_historicalbookallocation_historicalbookreplenishment.py
|
kaizer88/emps
|
2669b32c46befcf1a19390fb25013817e6b00980
|
[
"MIT"
] | null | null | null |
operations/stock/migrations/0002_book_bookallocation_bookreplenishment_historicalbookallocation_historicalbookreplenishment.py
|
kaizer88/emps
|
2669b32c46befcf1a19390fb25013817e6b00980
|
[
"MIT"
] | null | null | null |
operations/stock/migrations/0002_book_bookallocation_bookreplenishment_historicalbookallocation_historicalbookreplenishment.py
|
kaizer88/emps
|
2669b32c46befcf1a19390fb25013817e6b00980
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('offices', '0004_auto_20170614_1633'),
('employees', '0004_auto_20170619_1422'),
('stock', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('book_type', models.CharField(default=b'Operational', max_length=120, null=True, blank=True)),
('written_by', models.CharField(default=b'Operational', max_length=20, null=True, blank=True, choices=[(b'sales', b'Sales Department'), (b'marketing', b'Marketing Departrment')])),
('quantity', models.FloatField(default=0, max_length=20, null=True, blank=True)),
],
),
migrations.CreateModel(
name='BookAllocation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('range_from', models.CharField(default=b'Operational', max_length=120, null=True, blank=True)),
('range_to', models.CharField(default=b'Operational', max_length=120, null=True, blank=True)),
('quantity', models.FloatField(default=0, max_length=20, null=True, blank=True)),
('date_allocated', models.DateField(null=True, blank=True)),
('accept', models.BooleanField(default=False)),
('authorize', models.CharField(default=b'Pending', max_length=20, null=True, blank=True, choices=[(b'Pending', b'Pending'), (b'Aproved', b'Authorize'), (b'Declined', b'Decline')])),
('book', models.ForeignKey(related_name='book_bookallocations', to='stock.Book')),
('created_by', models.ForeignKey(related_name='user_bookallocations', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='user_modified_bookallocations', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('region', models.ForeignKey(related_name='region_bookallocations', to='offices.Region')),
('regional_admin_manager', models.ForeignKey(related_name='ram_bookallocations', to='employees.Employee')),
],
),
migrations.CreateModel(
name='BookReplenishment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('range_from', models.CharField(default=b'Operational', max_length=120, null=True, blank=True)),
('range_to', models.CharField(default=b'Operational', max_length=120, null=True, blank=True)),
('quantity', models.FloatField(default=0, max_length=20, null=True, blank=True)),
('date_ordered', models.DateField(null=True, blank=True)),
('date_recieved', models.DateField(null=True, blank=True)),
('recieved', models.BooleanField(default=False)),
('accept', models.BooleanField(default=False)),
('authorize', models.CharField(default=b'Pending', max_length=20, null=True, blank=True, choices=[(b'Pending', b'Pending'), (b'Aproved', b'Authorize'), (b'Declined', b'Decline')])),
('book', models.ForeignKey(related_name='book_bookreplenishment', to='stock.Book')),
('created_by', models.ForeignKey(related_name='user_bookreplenishment', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='user_modified_bookreplenishment', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='HistoricalBookAllocation',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('range_from', models.CharField(default=b'Operational', max_length=120, null=True, blank=True)),
('range_to', models.CharField(default=b'Operational', max_length=120, null=True, blank=True)),
('quantity', models.FloatField(default=0, max_length=20, null=True, blank=True)),
('date_allocated', models.DateField(null=True, blank=True)),
('accept', models.BooleanField(default=False)),
('authorize', models.CharField(default=b'Pending', max_length=20, null=True, blank=True, choices=[(b'Pending', b'Pending'), (b'Aproved', b'Authorize'), (b'Declined', b'Decline')])),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('book', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='stock.Book', null=True)),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('region', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='offices.Region', null=True)),
('regional_admin_manager', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='employees.Employee', null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical book allocation',
},
),
migrations.CreateModel(
name='HistoricalBookReplenishment',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('range_from', models.CharField(default=b'Operational', max_length=120, null=True, blank=True)),
('range_to', models.CharField(default=b'Operational', max_length=120, null=True, blank=True)),
('quantity', models.FloatField(default=0, max_length=20, null=True, blank=True)),
('date_ordered', models.DateField(null=True, blank=True)),
('date_recieved', models.DateField(null=True, blank=True)),
('recieved', models.BooleanField(default=False)),
('accept', models.BooleanField(default=False)),
('authorize', models.CharField(default=b'Pending', max_length=20, null=True, blank=True, choices=[(b'Pending', b'Pending'), (b'Aproved', b'Authorize'), (b'Declined', b'Decline')])),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('book', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='stock.Book', null=True)),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical book replenishment',
},
),
]
| 75.904348
| 197
| 0.632948
| 970
| 8,729
| 5.513402
| 0.120619
| 0.05834
| 0.065632
| 0.079469
| 0.859013
| 0.859013
| 0.859013
| 0.840127
| 0.840127
| 0.833396
| 0
| 0.013143
| 0.206782
| 8,729
| 114
| 198
| 76.570175
| 0.759243
| 0.002406
| 0
| 0.685185
| 0
| 0
| 0.186883
| 0.030669
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.064815
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8a2752b67de64b9f2b973966e6a151a79dbe7234
| 94
|
py
|
Python
|
coringa/ledgers/tests/api/__init__.py
|
joyinsky/coringa
|
79ad781a644dd8b93eeb4acb07e60233bc869a77
|
[
"MIT"
] | null | null | null |
coringa/ledgers/tests/api/__init__.py
|
joyinsky/coringa
|
79ad781a644dd8b93eeb4acb07e60233bc869a77
|
[
"MIT"
] | null | null | null |
coringa/ledgers/tests/api/__init__.py
|
joyinsky/coringa
|
79ad781a644dd8b93eeb4acb07e60233bc869a77
|
[
"MIT"
] | 1
|
2020-03-12T00:15:10.000Z
|
2020-03-12T00:15:10.000Z
|
from .ledger import *
from .account import *
from .payee import *
from .transaction import *
| 15.666667
| 26
| 0.734043
| 12
| 94
| 5.75
| 0.5
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180851
| 94
| 5
| 27
| 18.8
| 0.896104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8a53884e481f7e8b91318de2c8ba9326854d0d9d
| 8,037
|
py
|
Python
|
source_data/model.py
|
KeePinnnn/social_media_analytic
|
d13580c7dcfc87699bf42c0f870fefccc2f4c78b
|
[
"MIT"
] | 1
|
2019-09-13T13:08:28.000Z
|
2019-09-13T13:08:28.000Z
|
source_data/model.py
|
KeePinnnn/social_media_analytic
|
d13580c7dcfc87699bf42c0f870fefccc2f4c78b
|
[
"MIT"
] | null | null | null |
source_data/model.py
|
KeePinnnn/social_media_analytic
|
d13580c7dcfc87699bf42c0f870fefccc2f4c78b
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
from sklearn.model_selection import KFold
class embedding_model():
def __init__(self, file_path:str):
self.data = pd.read_csv(file_path)
self.df = pd.DataFrame(self.data)
self.df['username'] = self.df['username'].fillna('')
self.df = self.df.sample(frac=1).reset_index(drop=True)
self.kfold = KFold(n_splits=5)
def feature_input(self):
self.content = self.df['text'].values
self.type = self.df['type'].values
def embedding_feature(self):
self.text_embedding = hub.text_embedding_column(
"content",
module_spec="https://tfhub.dev/google/nnlm-en-dim128-with-normalization/1",
trainable=False
)
def model_setup(self):
self.binary_label_head = tf.contrib.estimator.binary_classification_head(
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE
)
self.estimator = tf.estimator.DNNEstimator(
head=self.binary_label_head,
hidden_units=[128,64],
feature_columns=[self.text_embedding],
batch_norm=True,
model_dir="./estimator_cred_score"
)
def train_model(self):
for train_index, test_index in self.kfold.split(self.type):
self.train_content = self.content[train_index].astype(np.str)
self.train_type = self.type[train_index].astype(np.int32)
self.test_content = self.content[test_index].astype(np.str)
self.test_type = self.type[test_index].astype(np.int32)
features = {
"content": self.train_content,
}
labels = self.train_type
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
features,
labels,
shuffle=False,
batch_size=64,
num_epochs=10
)
print("start training")
self.estimator.train(input_fn=train_input_fn)
def restore_saved_model(self):
print("start restoring model")
self.estimator = tf.estimator.DNNClassifier(
hidden_units=[128,64],
feature_columns=[self.text_embedding],
warm_start_from="./estimator_cred_score"
)
def test_model(self):
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn({
"content": self.test_content,
},
self.test_type,
shuffle=False
)
print("start predicting")
return self.estimator.evaluate(input_fn=eval_input_fn)
def predict_model(self, content:object):
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn({
"content": content,
},
shuffle=False
)
print("start predicting")
return self.estimator.predict(input_fn=eval_input_fn)
class dnn_model():
def __init__(self, file_path:str):
self.data = pd.read_csv(file_path)
self.df = pd.DataFrame(self.data)
self.df = self.df.sample(frac=1).reset_index(drop=True)
self.kfold = KFold(n_splits=5)
def feature_input(self):
self.content = self.df['text'].values
self.type = self.df['type'].values
self.user_cred = self.df['user_credibility'].values
self.user_verf = self.df['verified'].values
def embedding_feature(self):
self.text_embedding = hub.text_embedding_column(
"content",
module_spec="https://tfhub.dev/google/nnlm-en-dim128-with-normalization/1",
# module_spec="https://tfhub.dev/google/Wiki-words-250-with-normalization/1",
trainable=False
)
self.user_cred_feature = tf.feature_column.numeric_column("user_credibility")
self.user_verf_feature = tf.feature_column.numeric_column("user_verified")
def model_setup(self):
# self.binary_label_head = tf.contrib.estimator.binary_classification_head(
# loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE
# )
# self.estimator = tf.estimator.DNNEstimator(
# head=self.binary_label_head,
# hidden_units=[128,64],
# feature_columns=[self.text_embedding, self.user_cred_feature, self.user_verf_feature],
# batch_norm=True,
# model_dir="./estimator_dnn"
# )
self.estimator = tf.estimator.DNNClassifier(
n_classes=2,
hidden_units=[128,64],
feature_columns=[self.text_embedding, self.user_cred_feature, self.user_verf_feature],
batch_norm=True,
model_dir="./estimator_new"
)
# self.estimator = tf.estimator.DNNLinearCombinedClassifier(
# dnn_feature_columns=[self.text_embedding, self.user_verf_feature, self.user_cred_feature],
# dnn_hidden_units=[128,64],
# batch_norm=True,
# model_dir='./estimator_linear_classifier'
# )
# self.estimator = tf.estimator.LinearClassifier(
# feature_columns=[self.text_embedding, self.user_cred_feature, self.user_verf_feature],
# optimizer='Adagrad',
# model_dir='./estimator_linear'
# )
def restore_saved_model(self):
print("start restoring model")
self.estimator = tf.estimator.DNNClassifier(
hidden_units=[128,64],
feature_columns=[self.text_embedding, self.user_cred_feature, self.user_verf_feature],
warm_start_from="./estimator_dnn"
)
def train_model(self):
for train_index, test_index in self.kfold.split(self.type):
self.train_content = self.content[train_index].astype(np.str)
self.train_verf = self.user_verf[train_index].astype(np.float)
self.train_cred = self.user_cred[train_index].astype(np.float)
self.train_type = self.type[train_index].astype(np.int32)
self.test_content = self.content[test_index].astype(np.str)
self.test_verf = self.user_verf[test_index].astype(np.float)
self.test_cred = self.user_cred[test_index].astype(np.float)
self.test_type = self.type[test_index].astype(np.int32)
features = {
"content": self.train_content,
"user_credibility": self.train_cred,
"user_verified": self.train_verf
}
labels = self.train_type
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
features,
labels,
shuffle=False,
batch_size=64,
num_epochs=10
)
print("start training")
self.estimator.train(input_fn=train_input_fn)
def test_model(self):
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn({
"content": self.test_content,
"user_credibility": self.test_cred,
"user_verified": self.test_verf
},
self.test_type,
shuffle=False
)
print("start predicting")
print(self.estimator.evaluate(input_fn=eval_input_fn))
def predict_model(self, content:list, verified:list, credibility:list):
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn({
"content": np.array(content),
"user_credibility": np.array(credibility),
"user_verified": np.array(verified)
},
shuffle=False
)
print("start predicting")
return self.estimator.predict(input_fn=eval_input_fn)
| 37.381395
| 104
| 0.591639
| 911
| 8,037
| 4.953897
| 0.148189
| 0.037226
| 0.034567
| 0.037226
| 0.811655
| 0.793264
| 0.773543
| 0.723909
| 0.70308
| 0.70308
| 0
| 0.012355
| 0.305089
| 8,037
| 215
| 105
| 37.381395
| 0.795703
| 0.113848
| 0
| 0.63871
| 0
| 0
| 0.07831
| 0.006197
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103226
| false
| 0
| 0.032258
| 0
| 0.167742
| 0.058065
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8a72a6e55bde5adc34769fc24d044a99404a4e62
| 133
|
py
|
Python
|
pyrevolve/genotype/cppnneat/body/config.py
|
braj29/robo_swimmers
|
b3c3fa91976884095eb6b5e67844167598ec573d
|
[
"Apache-1.1"
] | null | null | null |
pyrevolve/genotype/cppnneat/body/config.py
|
braj29/robo_swimmers
|
b3c3fa91976884095eb6b5e67844167598ec573d
|
[
"Apache-1.1"
] | null | null | null |
pyrevolve/genotype/cppnneat/body/config.py
|
braj29/robo_swimmers
|
b3c3fa91976884095eb6b5e67844167598ec573d
|
[
"Apache-1.1"
] | null | null | null |
from dataclasses import dataclass
from ..config import Config as CppnneatConfig
@dataclass
class Config(CppnneatConfig):
pass
| 14.777778
| 45
| 0.796992
| 15
| 133
| 7.066667
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 133
| 8
| 46
| 16.625
| 0.946429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
8a8951a0ca869e31f330e9b18326fad35334678c
| 37
|
py
|
Python
|
covid_api/api/api_v1/endpoints/__init__.py
|
NASA-IMPACT/covid-api
|
62fa23918861438090367a85c1dca5300871b9c7
|
[
"MIT"
] | 14
|
2020-05-27T14:22:37.000Z
|
2022-03-31T04:44:25.000Z
|
covid_api/api/api_v1/endpoints/__init__.py
|
NASA-IMPACT/covid-api
|
62fa23918861438090367a85c1dca5300871b9c7
|
[
"MIT"
] | 47
|
2020-05-27T02:43:08.000Z
|
2021-11-30T17:50:45.000Z
|
covid_api/api/api_v1/endpoints/__init__.py
|
NASA-IMPACT/covid-api
|
62fa23918861438090367a85c1dca5300871b9c7
|
[
"MIT"
] | 3
|
2021-05-29T14:15:28.000Z
|
2021-07-01T01:20:09.000Z
|
"""covid_api.api.api_v1.endpoints"""
| 18.5
| 36
| 0.72973
| 6
| 37
| 4.166667
| 0.666667
| 0.48
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.027027
| 37
| 1
| 37
| 37
| 0.666667
| 0.810811
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8ad5686552302cf29dbfcda1aeee2ebf347ce280
| 22
|
py
|
Python
|
InClass/Day23/08.py
|
walkingtyphoon/Python-workspace
|
e872bce82b2bac3dd5d809f8576345ccc1c6afb7
|
[
"Apache-2.0"
] | null | null | null |
InClass/Day23/08.py
|
walkingtyphoon/Python-workspace
|
e872bce82b2bac3dd5d809f8576345ccc1c6afb7
|
[
"Apache-2.0"
] | null | null | null |
InClass/Day23/08.py
|
walkingtyphoon/Python-workspace
|
e872bce82b2bac3dd5d809f8576345ccc1c6afb7
|
[
"Apache-2.0"
] | null | null | null |
print(bool("1" == 1))
| 11
| 21
| 0.5
| 4
| 22
| 2.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 0.136364
| 22
| 1
| 22
| 22
| 0.473684
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
0a030ea33f796dc1230712cfdaa3c6ec1b5b0d48
| 119
|
py
|
Python
|
booking/admin.py
|
Sisky/Magik-API
|
df0e4267c98d88a6211a061e4de3325e29df1e93
|
[
"MIT"
] | null | null | null |
booking/admin.py
|
Sisky/Magik-API
|
df0e4267c98d88a6211a061e4de3325e29df1e93
|
[
"MIT"
] | null | null | null |
booking/admin.py
|
Sisky/Magik-API
|
df0e4267c98d88a6211a061e4de3325e29df1e93
|
[
"MIT"
] | 1
|
2018-12-07T08:32:18.000Z
|
2018-12-07T08:32:18.000Z
|
from django.contrib import admin
import booking.models as booking_models
admin.site.register(booking_models.Booking)
| 19.833333
| 43
| 0.848739
| 17
| 119
| 5.823529
| 0.588235
| 0.393939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092437
| 119
| 5
| 44
| 23.8
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0a12645b8a7bbb957ed170663c6c18b6e0501e16
| 1,573
|
py
|
Python
|
localtileserver/examples.py
|
RichardScottOZ/localtileserver
|
a0e63d1853c8d3410ba9d4ca51c993eae15a0fe6
|
[
"MIT"
] | null | null | null |
localtileserver/examples.py
|
RichardScottOZ/localtileserver
|
a0e63d1853c8d3410ba9d4ca51c993eae15a0fe6
|
[
"MIT"
] | null | null | null |
localtileserver/examples.py
|
RichardScottOZ/localtileserver
|
a0e63d1853c8d3410ba9d4ca51c993eae15a0fe6
|
[
"MIT"
] | null | null | null |
from typing import Union
from localtileserver.client import TileClient
from localtileserver.tileserver import get_data_path, get_pine_gulch_url, get_sf_bay_url
def get_blue_marble(port: Union[int, str] = "default", debug: bool = False):
path = get_data_path("frmt_wms_bluemarble_s3_tms.xml")
return TileClient(path, port=port, debug=debug)
def get_virtual_earth(port: Union[int, str] = "default", debug: bool = False):
path = get_data_path("frmt_wms_virtualearth.xml")
return TileClient(path, port=port, debug=debug)
def get_arcgis(port: Union[int, str] = "default", debug: bool = False):
path = get_data_path("frmt_wms_arcgis_mapserver_tms.xml")
return TileClient(path, port=port, debug=debug)
def get_elevation(port: Union[int, str] = "default", debug: bool = False):
path = get_data_path("aws_elevation_tiles_prod.xml")
return TileClient(path, port=port, debug=debug)
def get_bahamas(port: Union[int, str] = "default", debug: bool = False):
path = get_data_path("bahamas_rgb.tif")
return TileClient(path, port=port, debug=debug)
def get_pine_gulch(port: Union[int, str] = "default", debug: bool = False):
path = get_pine_gulch_url()
return TileClient(path, port=port, debug=debug)
def get_landsat(port: Union[int, str] = "default", debug: bool = False):
path = get_data_path("landsat.tif")
return TileClient(path, port=port, debug=debug)
def get_san_francisco(port: Union[int, str] = "default", debug: bool = False):
path = get_sf_bay_url()
return TileClient(path, port=port, debug=debug)
| 34.955556
| 88
| 0.727273
| 232
| 1,573
| 4.706897
| 0.206897
| 0.057692
| 0.087912
| 0.10989
| 0.748169
| 0.722527
| 0.722527
| 0.722527
| 0.682234
| 0.641941
| 0
| 0.000745
| 0.146853
| 1,573
| 44
| 89
| 35.75
| 0.812966
| 0
| 0
| 0.296296
| 0
| 0
| 0.125874
| 0.073744
| 0
| 0
| 0
| 0
| 0
| 1
| 0.296296
| false
| 0
| 0.111111
| 0
| 0.703704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
0a59a52eec61a03db94afacc6dcbd2cfcfb167af
| 189
|
py
|
Python
|
notebooks/platform/xarray/nb/backends.py
|
tomwhite/gwas-analysis
|
5b219607b8311722f16f7df8a8aad09ba69dc448
|
[
"Apache-2.0"
] | 19
|
2020-03-18T01:06:58.000Z
|
2022-02-06T19:59:30.000Z
|
notebooks/platform/xarray/nb/backends.py
|
tomwhite/gwas-analysis
|
5b219607b8311722f16f7df8a8aad09ba69dc448
|
[
"Apache-2.0"
] | 39
|
2020-01-20T19:50:19.000Z
|
2021-01-07T19:01:48.000Z
|
notebooks/platform/xarray/nb/backends.py
|
tomwhite/gwas-analysis
|
5b219607b8311722f16f7df8a8aad09ba69dc448
|
[
"Apache-2.0"
] | 5
|
2020-03-13T20:47:24.000Z
|
2022-01-13T09:43:35.000Z
|
from lib import api
api.config.set('stats.axis_intervals.backend', 'numba')
api.config.set('stats.ld_matrix.backend', 'dask')
api.config.set('graph.maximal_independent_set.backend', 'dask')
| 47.25
| 63
| 0.777778
| 29
| 189
| 4.931034
| 0.586207
| 0.188811
| 0.251748
| 0.237762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 189
| 4
| 63
| 47.25
| 0.794444
| 0
| 0
| 0
| 0
| 0
| 0.531579
| 0.463158
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a5ec1f756eb7edb9d1476a7e02790a974b0a706
| 131
|
py
|
Python
|
lib/module.py
|
hoefkensj/pimp
|
7a98496ee5fe64fc3256375d38937b6698f47848
|
[
"Unlicense"
] | null | null | null |
lib/module.py
|
hoefkensj/pimp
|
7a98496ee5fe64fc3256375d38937b6698f47848
|
[
"Unlicense"
] | null | null | null |
lib/module.py
|
hoefkensj/pimp
|
7a98496ee5fe64fc3256375d38937b6698f47848
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
"""
Lib: Module
Functions Related for creation,detection and manipulation of python modules
"""
import os
| 14.555556
| 76
| 0.748092
| 18
| 131
| 5.444444
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152672
| 131
| 8
| 77
| 16.375
| 0.882883
| 0.885496
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6a5f624ea3f57c08faa60594dc7bab5b635909dd
| 11,148
|
py
|
Python
|
tests/test_questions.py
|
abadojack/StackOverflow-lite
|
ce709190428097e2b7356c1407276829d5c68402
|
[
"MIT"
] | 2
|
2019-01-30T18:48:14.000Z
|
2020-05-14T08:17:55.000Z
|
tests/test_questions.py
|
abadojack/StackOverflow-lite
|
ce709190428097e2b7356c1407276829d5c68402
|
[
"MIT"
] | 5
|
2018-08-24T05:10:55.000Z
|
2018-08-31T00:59:09.000Z
|
tests/test_questions.py
|
abadojack/StackOverflow-lite
|
ce709190428097e2b7356c1407276829d5c68402
|
[
"MIT"
] | 2
|
2019-01-07T17:23:02.000Z
|
2019-01-30T18:48:17.000Z
|
import json
from project.models.models import Question, Answer
from project.users.views import auth_decode
from . import BaseTest
class TestQuestions(BaseTest):
def test_get_questions(self):
res = self.client().get('/api/v1/questions', content_type='application/json', headers=dict(token=self.login()))
resp_data = json.loads(res.data.decode())
assert res.status_code == 200
self.assertTrue(resp_data['questions'])
def test_get_questions_no_token(self):
res = self.client().get('/api/v1/questions', content_type='application/json', headers=dict(token='1'))
resp_data = json.loads(res.data.decode())
assert res.status_code == 401
self.assertTrue(resp_data['response'] == 'Invalid token')
def test_get_question_success(self):
res = self.client().get('/api/v1/questions', headers=dict(token=self.login()))
question_id = json.loads(res.data.decode())['questions'][0]['question_id']
resp = self.client().get('/api/v1/questions/%s' % question_id, headers=dict(token=self.login()))
assert resp.status_code == 200
resp_data = json.loads(resp.data)
self.assertTrue(resp_data["question"])
def test_get_question_invalid_token(self):
res = self.client().get('/api/v1/questions/1', headers=dict(token=''))
assert res.status_code == 401
def test_get_question_not_found(self):
resp = self.client().get('/api/v1/questions/30', headers=dict(token=self.login()))
assert resp.status_code == 404
def test_add_question_empty_title(self):
resp = self.client().post('/api/v1/questions',
headers=dict(token=self.login()),
data=json.dumps({'body': 'Is this the real life?', 'title': ''}))
assert resp.status_code == 400
def test_add_question_empty_body(self):
resp = self.client().post('/api/v1/questions',
headers=dict(token=self.login()),
data=json.dumps({'body': '', 'title': 'life'}))
assert resp.status_code == 400
def test_add_question_same_title(self):
question = Question('unique title', 'question 10 body', 1)
question.insert_question()
resp = self.client().post('/api/v1/questions',
headers=dict(token=self.login()),
data=json.dumps({'body': 'body is here', 'title': 'unique title'}))
assert resp.status_code == 409
def test_add_question_user_empty_token(self):
resp = self.client().post('/api/v1/questions',
headers=dict(token=''),
data=json.dumps({'body': 'Is this the real life?', 'title': 'question'}))
assert resp.status_code == 401
def test_add_answer(self):
question = Question('question add answer', 'question 10 body', 1)
question.insert_question()
resp = self.client().post('/api/v1/questions/' + question.question_id.__str__() + '/answers', data=json.dumps(
dict(title='test title', body='some body of quiz')),
content_type='application/json',
headers=dict(token=self.login()))
assert resp.status_code == 201
resp_data = json.loads(resp.data.decode())
self.assertEqual(resp_data['response'], 'answer posted successfully')
def test_add_answer_duplicate(self):
question = Question('question add answer another one', 'question 10 body', 1)
question.insert_question()
Answer('duplicate', 1, question.question_id).insert_answer()
resp = self.client().post('/api/v1/questions/' + question.question_id.__str__() + '/answers', data=json.dumps(
dict(body='duplicate')),
content_type='application/json',
headers=dict(token=self.login()))
assert resp.status_code == 409
def test_add_answer_invalid_token(self):
res = self.client().get('/api/v1/questions', headers=dict(token=self.login()))
question_id = json.loads(res.data.decode())['questions'][0]['question_id']
resp = self.client().post('/api/v1/questions/' + question_id + '/answers', data=json.dumps(
dict(title='test title', body='some body of quiz')),
content_type='application/json',
headers=dict(token=''))
assert resp.status_code == 401
resp_data = json.loads(resp.data.decode())
self.assertEqual(resp_data['response'], 'Invalid token')
def test_add_answer_question_not_found(self):
resp = self.client().post('/api/v1/questions/1/answers', data=json.dumps(
dict(title='test title 1 not found', body='some body of quiz')),
content_type='application/json',
headers=dict(token=self.login()))
assert resp.status_code == 404
def test_delete_question(self):
question = Question('question delete title', 'question 10 body', 1)
question.insert_question()
Answer('answer delete title', 1, question.question_id).insert_answer()
resp = self.client().delete('api/v1/questions/' + question.question_id.__str__(),
content_type='application/json',
headers=dict(token=self.login()))
assert resp.status_code == 200
resp_data = json.loads(resp.data.decode())
self.assertEqual(resp_data['response'], 'question deleted successfully')
def test_delete_question_invalid_token(self):
resp = self.client().delete('api/v1/questions/1',
content_type='application/json',
headers=dict(token=''))
assert resp.status_code == 401
def test_delete_question_not_found(self):
resp = self.client().delete('api/v1/questions/1',
content_type='application/json',
headers=dict(token=self.login()))
assert resp.status_code == 404
def test_get_popular_question(self):
question = Question('question 10 title', 'question 10 body', 1)
question.insert_question()
Answer('answer 10 title', 1, question.question_id).insert_answer()
Answer('answer 101 title', 1, question.question_id).insert_answer()
Answer('answer 102 title', 1, question.question_id).insert_answer()
Answer('answer 103 title', 1, question.question_id).insert_answer()
res = self.client().get('api/v1/questions/popular', content_type='application/json',
headers=dict(token=self.login()))
assert res.status_code == 200
resp_data = json.loads(res.data.decode())
assert resp_data['question']
def test_get_popular_question_invalid_token(self):
res = self.client().get('api/v1/questions/popular', content_type='application/json',
headers=dict(token=''))
assert res.status_code == 401
def test_update_answer_body(self):
question = Question('update answer', 'question 10 body', 1)
question.insert_question()
token = self.login()
user_id = auth_decode(token)
answer = Answer('update answer', user_id, question.question_id)
answer.insert_answer()
resp = self.client().put('/api/v1/questions/' + question.question_id.__str__() + '/answers/' +
answer.answer_id.__str__(), data=json.dumps(
dict(body='newly updated answer')),
content_type='application/json',
headers=dict(token=token))
assert resp.status_code == 200
resp_data = json.loads(resp.data.decode())
self.assertEqual(resp_data['response'], 'answer updated successfully')
def test_update_answer_body_invalid_json(self):
question = Question('update answer 1', 'question 10 body', 1)
question.insert_question()
token = self.login()
user_id = auth_decode(token)
answer = Answer('update answer 1', user_id, question.question_id)
answer.insert_answer()
resp = self.client().put('/api/v1/questions/' + question.question_id.__str__() + '/answers/' +
answer.answer_id.__str__(), data=json.dumps(
dict(bod='newly updated answer')),
content_type='application/json',
headers=dict(token=token))
assert resp.status_code == 400
resp_data = json.loads(resp.data.decode())
self.assertEqual(resp_data['response'], 'Invalid json')
def test_update_answer_body_invalid_token(self):
question = Question('update answer 2', 'question 10 body', 1)
question.insert_question()
token = self.login()
user_id = auth_decode(token)
answer = Answer('update answer 2', user_id, question.question_id)
answer.insert_answer()
resp = self.client().put('/api/v1/questions/' + question.question_id.__str__() + '/answers/' +
answer.answer_id.__str__(), data=json.dumps(
dict(bod='newly updated answer')),
content_type='application/json',
headers=dict(token=''))
assert resp.status_code == 401
resp_data = json.loads(resp.data.decode())
self.assertEqual(resp_data['response'], 'Invalid token')
def test_update_answer_body_mark_as_preferred(self):
token = self.login()
user_id = auth_decode(token)
question = Question('update answer preferred', 'question 10 body', user_id)
question.insert_question()
answer = Answer('update answer preferred', 1, question.question_id)
answer.insert_answer()
resp = self.client().put('/api/v1/questions/' + question.question_id.__str__() + '/answers/' +
answer.answer_id.__str__(),
content_type='application/json',
headers=dict(token=token))
assert resp.status_code == 200
resp_data = json.loads(resp.data.decode())
self.assertEqual(resp_data['response'], 'preferred answer marked successfully')
def test_endpoint_not_found(self):
resp = self.client().get('/api/v1/questions/answers/',
content_type='application/json',
headers=dict(token=''))
resp.status_code = 404
def test_endpoint_not_found(self):
resp = self.client().patch('/api/v1/questions/answers/',
content_type='application/json',
headers=dict(token=''))
resp.status_code = 405
| 44.414343
| 119
| 0.58746
| 1,257
| 11,148
| 5.005569
| 0.08035
| 0.038144
| 0.057851
| 0.070248
| 0.881596
| 0.833757
| 0.804673
| 0.769867
| 0.725842
| 0.665925
| 0
| 0.019728
| 0.281575
| 11,148
| 250
| 120
| 44.592
| 0.765888
| 0
| 0
| 0.589474
| 0
| 0
| 0.168371
| 0.011392
| 0
| 0
| 0
| 0
| 0.173684
| 1
| 0.126316
| false
| 0
| 0.021053
| 0
| 0.152632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6a6c7d0be3229cf563caf30ddb1174c5a93f05c7
| 112,455
|
py
|
Python
|
src/openprocurement/tender/cfaselectionua/tests/lot_blanks.py
|
scrubele/prozorro-testing
|
42b93ea2f25d8cc40e66c596f582c7c05e2a9d76
|
[
"Apache-2.0"
] | null | null | null |
src/openprocurement/tender/cfaselectionua/tests/lot_blanks.py
|
scrubele/prozorro-testing
|
42b93ea2f25d8cc40e66c596f582c7c05e2a9d76
|
[
"Apache-2.0"
] | 2
|
2021-03-25T23:27:04.000Z
|
2022-03-21T22:18:15.000Z
|
src/openprocurement/tender/cfaselectionua/tests/lot_blanks.py
|
scrubele/prozorro-testing
|
42b93ea2f25d8cc40e66c596f582c7c05e2a9d76
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from copy import deepcopy
from datetime import timedelta
from uuid import uuid4
from email.header import Header
import unittest
from openprocurement.api.utils import get_now
from openprocurement.tender.cfaselectionua.tests.base import test_organization, test_agreement
# Tender Lot Resouce Test
def create_tender_lot_invalid(self):
# Tender contain one lot
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertEqual(len(response.json["data"]["lots"]), 1)
response = self.app.post_json(
"/tenders/some_id/lots", {"data": {"title": "lot title", "description": "lot description"}}, status=404
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{u"description": u"Not Found", u"location": u"url", u"name": u"tender_id"}]
)
request_path = "/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token)
response = self.app.post(request_path, "data", status=415)
self.assertEqual(response.status, "415 Unsupported Media Type")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": u"Content-Type header should be one of ['application/json']",
u"location": u"header",
u"name": u"Content-Type",
}
],
)
response = self.app.post(request_path, "data", content_type="application/json", status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{u"description": u"No JSON object could be decoded", u"location": u"body", u"name": u"data"}],
)
response = self.app.post_json(request_path, "data", status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{u"description": u"Data not available", u"location": u"body", u"name": u"data"}]
)
response = self.app.post_json(request_path, {"not_data": {}}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{u"description": u"Data not available", u"location": u"body", u"name": u"data"}]
)
response = self.app.post_json(request_path, {"data": {}}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{u"description": [u"This field is required."], u"location": u"body", u"name": u"title"}],
)
response = self.app.post_json(request_path, {"data": {"invalid_field": "invalid_value"}}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{u"description": u"Rogue field", u"location": u"body", u"name": u"invalid_field"}]
)
response = self.app.post_json(request_path, {"data": {"value": "invalid_value"}}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [u"Please use a mapping for this field or Value instance instead of unicode."],
u"location": u"body",
u"name": u"value",
}
],
)
response = self.app.post_json(
request_path,
{
"data": {
"title": "lot title",
"description": "lot description",
"value": {"amount": "100.0"},
"minimalStep": {"amount": "500.0"},
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{u"description": [u"value should be less than value of lot"], u"location": u"body", u"name": u"minimalStep"}],
)
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": [{"relatedLot": "0" * 32}]}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [{u"relatedLot": [u"relatedLot should be one of lots"]}],
u"location": u"body",
u"name": u"items",
}
],
)
def create_tender_lot(self):
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertEqual(lot["title"], "lot title")
self.assertEqual(lot["description"], "lot description")
self.assertIn("id", lot)
self.assertIn(lot["id"], response.headers["Location"])
self.assertNotIn("guarantee", lot)
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertNotIn("guarantee", response.json["data"])
lot2 = deepcopy(self.test_lots_data[0])
lot2["guarantee"] = {"amount": 100500, "currency": "USD"}
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": lot2}
)
self.assertEqual(response.status, "201 Created")
data = response.json["data"]
self.assertIn("guarantee", data)
self.assertEqual(data["guarantee"]["amount"], 100500)
self.assertEqual(data["guarantee"]["currency"], "USD")
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertIn("guarantee", response.json["data"])
self.assertEqual(response.json["data"]["guarantee"]["amount"], 100500)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "USD")
self.assertNotIn("guarantee", response.json["data"]["lots"][0])
lot3 = deepcopy(self.test_lots_data[0])
lot3["guarantee"] = {"amount": 500, "currency": "UAH"}
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": lot3}, status=422
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [u"lot guarantee currency should be identical to tender guarantee currency"],
u"location": u"body",
u"name": u"lots",
}
],
)
lot3["guarantee"] = {"amount": 500}
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": lot3}, status=422
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [u"lot guarantee currency should be identical to tender guarantee currency"],
u"location": u"body",
u"name": u"lots",
}
],
)
lot3["guarantee"] = {"amount": 20, "currency": "USD"}
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": lot3}
)
self.assertEqual(response.status, "201 Created")
data = response.json["data"]
self.assertIn("guarantee", data)
self.assertEqual(data["guarantee"]["amount"], 20)
self.assertEqual(data["guarantee"]["currency"], "USD")
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertIn("guarantee", response.json["data"])
self.assertEqual(response.json["data"]["guarantee"]["amount"], 100500 + 20)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "USD")
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"guarantee": {"currency": "EUR"}}},
)
self.assertEqual(response.json["data"]["guarantee"]["amount"], 100500 + 20)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "EUR")
self.assertNotIn("guarantee", response.json["data"]["lots"][0])
self.assertEqual(response.json["data"]["lots"][1]["guarantee"]["amount"], 100500)
self.assertEqual(response.json["data"]["lots"][1]["guarantee"]["currency"], "EUR")
self.assertEqual(response.json["data"]["lots"][2]["guarantee"]["amount"], 20)
self.assertEqual(response.json["data"]["lots"][2]["guarantee"]["currency"], "EUR")
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": lot}, status=422
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{u"description": [u"Lot id should be uniq for all lots"], u"location": u"body", u"name": u"lots"}],
)
self.set_status("{}".format(self.forbidden_lot_actions_status))
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token),
{"data": self.test_lots_data[0]},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"],
"Can't add lot in current ({}) tender status".format(self.forbidden_lot_actions_status),
)
def patch_tender_lot(self):
response = self.app.get("/tenders/{}".format(self.tender_id))
lot = response.json["data"]["lots"][0]
tender = response.json["data"]
# active.enquiries period
new_lot_minimal_step = lot["minimalStep"]
new_lot_minimal_step["amount"] = new_lot_minimal_step["amount"] + 30
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"minimalStep": new_lot_minimal_step}},
)
self.assertEqual((response.status, response.content_type), ("200 OK", "application/json"))
self.assertEqual(response.json["data"]["minimalStep"]["amount"], new_lot_minimal_step["amount"])
new_lot_minimal_step["amount"] = 20
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"lots": [{"minimalStep": new_lot_minimal_step}]}},
)
self.assertEqual((response.status, response.content_type), ("200 OK", "application/json"))
self.assertEqual(response.json["data"]["lots"][0]["minimalStep"]["amount"], new_lot_minimal_step["amount"])
self.assertEqual(response.json["data"]["minimalStep"]["amount"], new_lot_minimal_step["amount"])
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"title": "new title"}},
)
self.assertEqual((response.status, response.content_type), ("200 OK", "application/json"))
self.assertEqual(response.json["data"]["title"], "new title")
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual((response.status, response.content_type), ("200 OK", "application/json"))
self.assertEqual(response.json["data"]["title"], "new title")
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"guarantee": {"amount": 12}}},
)
self.assertEqual((response.status, response.content_type), ("200 OK", "application/json"))
self.assertIn("guarantee", response.json["data"])
self.assertEqual(response.json["data"]["guarantee"]["amount"], 12)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "UAH")
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"guarantee": {"currency": "USD"}}},
)
self.assertEqual((response.status, response.content_type), ("200 OK", "application/json"))
lot_data = {u"value": {u"currency": u"UAH", u"amount": 200.0, u"valueAddedTaxIncluded": True}, u"id": lot["id"]}
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token), {"data": lot_data}
)
self.assertEqual((response.status, response.content_type), ("200 OK", "application/json"))
self.assertEqual(response.json, None)
items = deepcopy(tender["items"])
items[0]["quantity"] += 1
items[0]["description"] = "new description"
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token), {"data": {"items": items}}
)
self.assertEqual((response.status, response.content_type), ("200 OK", "application/json"))
self.assertEqual(response.json["data"]["items"][0]["quantity"], tender["items"][0]["quantity"] + 1)
self.assertEqual(response.json["data"]["items"][0]["description"], items[0]["description"])
# lots[0] value amount is recalculated
self.assertNotEqual(tender["lots"][0]["value"]["amount"], response.json["data"]["lots"][0]["value"]["amount"])
# patch minimalStep
new_lot_minimal_step = response.json["data"]["value"]
new_lot_minimal_step["amount"] = new_lot_minimal_step["amount"] - 1
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"minimalStep": new_lot_minimal_step}},
)
self.assertEqual((response.status, response.content_type), ("200 OK", "application/json"))
self.assertEqual(response.json["data"]["minimalStep"]["amount"], new_lot_minimal_step["amount"])
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertEqual(response.json["data"]["minimalStep"]["amount"], new_lot_minimal_step["amount"])
items[0]["quantity"] -= 1
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token), {"data": {"items": items}}, status=422
)
self.assertEqual((response.status, response.content_type), ("422 Unprocessable Entity", "application/json"))
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [{u"minimalStep": [u"value should be less than value of lot"]}],
u"location": u"body",
u"name": u"lots",
}
],
)
new_lot_minimal_step["amount"] = new_lot_minimal_step["amount"] / 2
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"minimalStep": new_lot_minimal_step}},
)
self.assertEqual((response.status, response.content_type), ("200 OK", "application/json"))
self.assertEqual(response.json["data"]["minimalStep"]["amount"], new_lot_minimal_step["amount"])
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token), {"data": {"items": items}}
)
self.assertEqual((response.status, response.content_type), ("200 OK", "application/json"))
self.assertEqual(response.json["data"]["status"], "active.enquiries")
self.assertEqual(response.json["data"]["lots"][0]["minimalStep"]["amount"], new_lot_minimal_step["amount"])
def patch_tender_lot_invalid(self):
response = self.app.get("/tenders/{}".format(self.tender_id))
lot = response.json["data"]["lots"][0]
# active.enquiries period
new_lot_minimal_step = lot["value"]
new_lot_minimal_step["amount"] = new_lot_minimal_step["amount"] + 30
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"minimalStep": new_lot_minimal_step}},
status=422,
)
self.assertEqual((response.status, response.content_type), ("422 Unprocessable Entity", "application/json"))
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{u"description": [u"value should be less than value of lot"], u"location": u"body", u"name": u"minimalStep"}],
)
response = self.app.patch_json(
"/tenders/{}/lots/some_id?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"title": "other title"}},
status=404,
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(response.json["errors"], [{u"description": u"Not Found", u"location": u"url", u"name": u"lot_id"}])
response = self.app.patch_json("/tenders/some_id/lots/some_id", {"data": {"title": "other title"}}, status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{u"description": u"Not Found", u"location": u"url", u"name": u"tender_id"}]
)
self.set_status("{}".format(self.forbidden_lot_actions_status))
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"title": "other title"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"],
"Can't update lot in current ({}) tender status".format(self.forbidden_lot_actions_status),
)
def patch_tender_currency(self):
# create lot
response = self.app.get("/tenders/{}".format(self.tender_id))
lot = response.json["data"]["lots"][0]
self.assertEqual(lot["value"]["currency"], "UAH")
# update tender currency without mimimalStep currency change
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"value": {"currency": "GBP"}}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [u"currency should be identical to currency of value of tender"],
u"location": u"body",
u"name": u"minimalStep",
}
],
)
# update tender currency
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"value": {"currency": "GBP"}, "minimalStep": {"currency": "GBP"}}},
)
self.assertEqual(response.status, "200 OK")
# log currency is updated too
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertEqual(lot["value"]["currency"], "GBP")
# try to update lot currency
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"value": {"currency": "USD"}}},
)
self.assertEqual(response.status, "200 OK")
# but the value stays unchanged
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertEqual(lot["value"]["currency"], "GBP")
# try to update minimalStep currency
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"minimalStep": {"currency": "USD"}}},
)
self.assertEqual(response.status, "200 OK")
# but the value stays unchanged
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertEqual(lot["minimalStep"]["currency"], "GBP")
# try to update lot minimalStep currency and lot value currency in single request
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"value": {"currency": "USD"}, "minimalStep": {"currency": "USD"}}},
)
self.assertEqual(response.status, "200 OK")
# but the value stays unchanged
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertEqual(lot["value"]["currency"], "GBP")
self.assertEqual(lot["minimalStep"]["currency"], "GBP")
def patch_tender_vat(self):
# set tender VAT
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"value": {"valueAddedTaxIncluded": True}}},
)
self.assertEqual(response.status, "200 OK")
# create lot
response = self.app.get("/tenders/{}".format(self.tender_id))
lot = response.json["data"]["lots"][0]
self.assertTrue(lot["value"]["valueAddedTaxIncluded"])
# update tender VAT
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"value": {"valueAddedTaxIncluded": False}, "minimalStep": {"valueAddedTaxIncluded": False}}},
)
self.assertEqual(response.status, "200 OK")
# log VAT is updated too
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertFalse(lot["value"]["valueAddedTaxIncluded"])
# try to update lot VAT
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"value": {"valueAddedTaxIncluded": True}}},
)
self.assertEqual(response.status, "200 OK")
# but the value stays unchanged
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertFalse(lot["value"]["valueAddedTaxIncluded"])
# try to update minimalStep VAT
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"minimalStep": {"valueAddedTaxIncluded": True}}},
)
self.assertEqual(response.status, "200 OK")
# but the value stays unchanged
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertFalse(lot["minimalStep"]["valueAddedTaxIncluded"])
# try to update minimalStep VAT and value VAT in single request
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"value": {"valueAddedTaxIncluded": True}, "minimalStep": {"valueAddedTaxIncluded": True}}},
)
self.assertEqual(response.status, "200 OK")
# but the value stays unchanged
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertFalse(lot["value"]["valueAddedTaxIncluded"])
self.assertEqual(lot["minimalStep"]["valueAddedTaxIncluded"], lot["value"]["valueAddedTaxIncluded"])
def get_tender_lot(self):
response = self.app.get("/tenders/{}".format(self.tender_id))
lot = response.json["data"]["lots"][0]
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
set(response.json["data"]), {u"id", u"date", u"title", u"description", u"minimalStep", u"status", u"value"}
)
self.set_status("active.qualification")
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
data = response.json["data"]
data.pop("auctionPeriod")
self.assertEqual(data, lot)
response = self.app.get("/tenders/{}/lots/some_id".format(self.tender_id), status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(response.json["errors"], [{u"description": u"Not Found", u"location": u"url", u"name": u"lot_id"}])
response = self.app.get("/tenders/some_id/lots/some_id", status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{u"description": u"Not Found", u"location": u"url", u"name": u"tender_id"}]
)
def get_tender_lots(self):
response = self.app.get("/tenders/{}".format(self.tender_id))
lot = response.json["data"]["lots"][0]
response = self.app.get("/tenders/{}/lots".format(self.tender_id))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
set(response.json["data"][0]), {u"id", u"date", u"title", u"description", u"status", u"value", u"minimalStep"}
)
self.set_status("active.qualification")
response = self.app.get("/tenders/{}/lots".format(self.tender_id))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
data = response.json["data"][0]
data.pop("auctionPeriod")
self.assertEqual(data, lot)
response = self.app.get("/tenders/some_id/lots", status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{u"description": u"Not Found", u"location": u"url", u"name": u"tender_id"}]
)
def delete_tender_lot(self):
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
response = self.app.delete("/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"], lot)
response = self.app.delete(
"/tenders/{}/lots/some_id?acc_token={}".format(self.tender_id, self.tender_token), status=404
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(response.json["errors"], [{u"description": u"Not Found", u"location": u"url", u"name": u"lot_id"}])
response = self.app.delete("/tenders/some_id/lots/some_id", status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{u"description": u"Not Found", u"location": u"url", u"name": u"tender_id"}]
)
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": [{"relatedLot": lot["id"]}]}},
)
self.assertEqual(response.status, "200 OK")
response = self.app.delete(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token), status=422
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [{u"relatedLot": [u"relatedLot should be one of lots"]}],
u"location": u"body",
u"name": u"items",
}
],
)
self.set_status("{}".format(self.forbidden_lot_actions_status))
response = self.app.delete(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token), status=403
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"],
"Can't delete lot in current ({}) tender status".format(self.forbidden_lot_actions_status),
)
def tender_lot_guarantee(self):
data = deepcopy(self.initial_data)
data["guarantee"] = {"amount": 100, "currency": "USD"}
response = self.app.post_json("/tenders", {"data": data})
tender = response.json["data"]
self.tender_id = tender["id"]
tender_token = response.json["access"]["token"]
self.assertEqual(response.status, "201 Created")
self.assertIn("guarantee", response.json["data"])
self.assertEqual(response.json["data"]["guarantee"]["amount"], 100)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "USD")
# switch to active.enquiries
self.set_status("active.enquiries")
lot = deepcopy(self.test_lots_data[0])
lot["guarantee"] = {"amount": 20, "currency": "USD"}
response = self.app.post_json("/tenders/{}/lots?acc_token={}".format(tender["id"], tender_token), {"data": lot})
self.assertEqual(response.status, "201 Created")
self.assertIn("guarantee", response.json["data"])
self.assertEqual(response.json["data"]["guarantee"]["amount"], 20)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "USD")
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender["id"], tender_token), {"data": {"guarantee": {"currency": "GBP"}}}
)
self.assertEqual(response.status, "200 OK")
self.assertIn("guarantee", response.json["data"])
self.assertEqual(response.json["data"]["guarantee"]["amount"], 20)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "GBP")
lot["guarantee"] = {"amount": 20, "currency": "GBP"}
response = self.app.post_json("/tenders/{}/lots?acc_token={}".format(tender["id"], tender_token), {"data": lot})
self.assertEqual(response.status, "201 Created")
lot_id = response.json["data"]["id"]
self.assertEqual(response.json["data"]["guarantee"]["amount"], 20)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "GBP")
response = self.app.get("/tenders/{}".format(tender["id"]))
self.assertEqual(response.json["data"]["guarantee"]["amount"], 20 + 20)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "GBP")
lot2 = deepcopy(self.test_lots_data[0])
lot2["guarantee"] = {"amount": 30, "currency": "GBP"}
response = self.app.post_json("/tenders/{}/lots?acc_token={}".format(tender["id"], tender_token), {"data": lot2})
self.assertEqual(response.status, "201 Created")
lot2_id = response.json["data"]["id"]
self.assertEqual(response.json["data"]["guarantee"]["amount"], 30)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "GBP")
lot2["guarantee"] = {"amount": 40, "currency": "USD"}
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender["id"], tender_token), {"data": lot2}, status=422
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [u"lot guarantee currency should be identical to tender guarantee currency"],
u"location": u"body",
u"name": u"lots",
}
],
)
response = self.app.get("/tenders/{}".format(tender["id"]))
self.assertIn("guarantee", response.json["data"])
self.assertEqual(response.json["data"]["guarantee"]["amount"], 20 + 20 + 30)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "GBP")
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender["id"], tender_token), {"data": {"guarantee": {"amount": 55}}}
)
self.assertEqual(response.json["data"]["guarantee"]["amount"], 20 + 20 + 30)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "GBP")
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(tender["id"], lot2_id, tender_token),
{"data": {"guarantee": {"amount": 35, "currency": "GBP"}}},
)
self.assertEqual(response.json["data"]["guarantee"]["amount"], 35)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "GBP")
response = self.app.get("/tenders/{}".format(tender["id"]))
self.assertIn("guarantee", response.json["data"])
self.assertEqual(response.json["data"]["guarantee"]["amount"], 20 + 20 + 35)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "GBP")
for l_id in (lot_id, lot2_id):
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(tender["id"], l_id, tender_token),
{"data": {"guarantee": {"amount": 0, "currency": "GBP"}}},
)
self.assertEqual(response.json["data"]["guarantee"]["amount"], 0)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "GBP")
response = self.app.get("/tenders/{}".format(tender["id"]))
self.assertIn("guarantee", response.json["data"])
self.assertEqual(response.json["data"]["guarantee"]["amount"], 20)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "GBP")
for l_id in (lot_id, lot2_id):
response = self.app.delete("/tenders/{}/lots/{}?acc_token={}".format(tender["id"], l_id, tender_token))
self.assertEqual(response.status, "200 OK")
response = self.app.get("/tenders/{}".format(tender["id"]))
self.assertIn("guarantee", response.json["data"])
self.assertEqual(response.json["data"]["guarantee"]["amount"], 20)
self.assertEqual(response.json["data"]["guarantee"]["currency"], "GBP")
# Tender Lot Feature Resource Test
def tender_value(self):
request_path = "/tenders/{}".format(self.tender_id)
response = self.app.get(request_path)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["value"]["amount"], sum([i["value"]["amount"] for i in self.initial_lots]))
self.assertEqual(
response.json["data"]["minimalStep"]["amount"], min([i["minimalStep"]["amount"] for i in self.initial_lots])
)
def tender_features_invalid(self):
request_path = "/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token)
data = self.initial_data.copy()
item = data["items"][0].copy()
item["id"] = "1"
data["items"] = [item]
data["features"] = [
{
"featureOf": "lot",
"relatedItem": self.initial_lots[0]["id"],
"title": u"Потужність всмоктування",
"enum": [
{"value": self.invalid_feature_value, "title": u"До 1000 Вт"},
{"value": 0.15, "title": u"Більше 1000 Вт"},
],
}
]
response = self.app.patch_json(request_path, {"data": data}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [
{u"enum": [{u"value": [u"Float value should be less than {}.".format(self.max_feature_value)]}]}
],
u"location": u"body",
u"name": u"features",
}
],
)
data["features"][0]["enum"][0]["value"] = 0.1
data["features"].append(data["features"][0].copy())
data["features"][1]["enum"][0]["value"] = self.sum_of_max_value_of_all_features
response = self.app.patch_json(request_path, {"data": data}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [
u"Sum of max value of all features for lot should be less then or equal to {0:.0%}".format(
self.sum_of_max_value_of_all_features
)
],
u"location": u"body",
u"name": u"features",
}
],
)
data["features"][1]["enum"][0]["value"] = 0.1
data["features"].append(data["features"][0].copy())
data["features"][2]["relatedItem"] = self.initial_lots[1]["id"]
data["features"].append(data["features"][2].copy())
response = self.app.patch_json(request_path, {"data": data})
self.assertEqual(response.status, "200 OK")
def tender_lot_document(self):
response = self.app.post(
"/tenders/{}/documents?acc_token={}".format(self.tender_id, self.tender_token),
upload_files=[("file", str(Header(u"укр.doc", "utf-8")), "content")],
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
doc_id = response.json["data"]["id"]
# dateModified = response.json["data"]['dateModified']
self.assertIn(doc_id, response.headers["Location"])
self.assertEqual(u"укр.doc", response.json["data"]["title"])
self.assertNotIn("documentType", response.json["data"])
response = self.app.patch_json(
"/tenders/{}/documents/{}?acc_token={}".format(self.tender_id, doc_id, self.tender_token),
{"data": {"documentOf": "lot"}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{u"description": [u"This field is required."], u"location": u"body", u"name": u"relatedItem"}],
)
response = self.app.patch_json(
"/tenders/{}/documents/{}?acc_token={}".format(self.tender_id, doc_id, self.tender_token),
{"data": {"documentOf": "lot", "relatedItem": "0" * 32}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{u"description": [u"relatedItem should be one of lots"], u"location": u"body", u"name": u"relatedItem"}],
)
# get tender for lot id
response = self.app.get("/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token), status=200)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
tender = response.json["data"]
# add document with lot_id
lot_id = tender["lots"][0]["id"]
response = self.app.patch_json(
"/tenders/{}/documents/{}?acc_token={}".format(self.tender_id, doc_id, self.tender_token),
{"data": {"documentOf": "lot", "relatedItem": lot_id}},
status=200,
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["relatedItem"], lot_id)
# Tender Lot Bid Resource Test
def create_tender_bid_invalid(self):
request_path = "/tenders/{}/bids".format(self.tender_id)
response = self.app.post_json(request_path, {"data": {"tenderers": [test_organization]}}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{u"description": [u"This field is required."], u"location": u"body", u"name": u"lotValues"}],
)
response = self.app.post_json(
request_path,
{"data": {"tenderers": [test_organization], "lotValues": [{"value": {"amount": 500}}]}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [{u"relatedLot": [u"This field is required."]}],
u"location": u"body",
u"name": u"lotValues",
}
],
)
response = self.app.post_json(
request_path,
{"data": {"tenderers": [test_organization], "lotValues": [{"value": {"amount": 500}, "relatedLot": "0" * 32}]}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [{u"relatedLot": [u"relatedLot should be one of lots"]}],
u"location": u"body",
u"name": u"lotValues",
}
],
)
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 5000000}, "relatedLot": self.initial_lots[0]["id"]}],
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [{u"value": [u"value of bid should be less than value of lot"]}],
u"location": u"body",
u"name": u"lotValues",
}
],
)
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": [test_organization],
"lotValues": [
{"value": {"amount": 500, "valueAddedTaxIncluded": False}, "relatedLot": self.initial_lots[0]["id"]}
],
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [
{
u"value": [
u"valueAddedTaxIncluded of bid should be identical to valueAddedTaxIncluded of value of lot"
]
}
],
u"location": u"body",
u"name": u"lotValues",
}
],
)
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500, "currency": "USD"}, "relatedLot": self.initial_lots[0]["id"]}],
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [{u"value": [u"currency of bid should be identical to currency of value of lot"]}],
u"location": u"body",
u"name": u"lotValues",
}
],
)
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": [test_organization],
"value": {"amount": 500},
"lotValues": [{"value": {"amount": 500}, "relatedLot": self.initial_lots[0]["id"]}],
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{u"description": [u"value should be posted for each lot of bid"], u"location": u"body", u"name": u"value"}],
)
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": test_organization,
"lotValues": [{"value": {"amount": 500}, "relatedLot": self.initial_lots[0]["id"]}],
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertIn(u"invalid literal for int() with base 10", response.json["errors"][0]["description"])
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": [test_organization],
"lotValues": [
{"value": {"amount": 500}, "relatedLot": self.initial_lots[0]["id"]},
{"value": {"amount": 500}, "relatedLot": self.initial_lots[0]["id"]},
],
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{u"description": [u"bids don't allow duplicated proposals"], u"location": u"body", u"name": u"lotValues"}],
)
def patch_tender_bid(self):
self.set_status("active.tendering")
lot_id = self.initial_lots[0]["id"]
response = self.app.post_json(
"/tenders/{}/bids".format(self.tender_id),
{"data": {"tenderers": [test_organization], "lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id}]}},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
bid = response.json["data"]
token = response.json["access"]["token"]
lot = bid["lotValues"][0]
response = self.app.patch_json(
"/tenders/{}/bids/{}?acc_token={}".format(self.tender_id, bid["id"], token),
{"data": {"tenderers": [{"name": u"Державне управління управлінням справами"}]}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["lotValues"][0]["date"], lot["date"])
self.assertNotEqual(response.json["data"]["tenderers"][0]["name"], bid["tenderers"][0]["name"])
response = self.app.patch_json(
"/tenders/{}/bids/{}?acc_token={}".format(self.tender_id, bid["id"], token),
{"data": {"lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id}], "tenderers": [test_organization]}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["lotValues"][0]["date"], lot["date"])
self.assertEqual(response.json["data"]["tenderers"][0]["name"], bid["tenderers"][0]["name"])
response = self.app.patch_json(
"/tenders/{}/bids/{}?acc_token={}".format(self.tender_id, bid["id"], token),
{"data": {"lotValues": [{"value": {"amount": 400}, "relatedLot": lot_id}]}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["lotValues"][0]["value"]["amount"], 400)
self.assertNotEqual(response.json["data"]["lotValues"][0]["date"], lot["date"])
# Tender Lot Feature Bid Resource Test
def create_tender_bid_invalid_feature(self):
request_path = "/tenders/{}/bids".format(self.tender_id)
response = self.app.post_json(request_path, {"data": {"tenderers": [test_organization]}}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{u"description": [u"All features parameters is required."], u"location": u"body", u"name": u"parameters"},
{u"description": [u"This field is required."], u"location": u"body", u"name": u"lotValues"},
],
)
response = self.app.post_json(
request_path,
{"data": {"tenderers": [test_organization], "lotValues": [{"value": {"amount": 500}}]}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [{u"relatedLot": [u"This field is required."]}],
u"location": u"body",
u"name": u"lotValues",
}
],
)
response = self.app.post_json(
request_path,
{"data": {"tenderers": [test_organization], "lotValues": [{"value": {"amount": 500}, "relatedLot": "0" * 32}]}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [{u"relatedLot": [u"relatedLot should be one of lots"]}],
u"location": u"body",
u"name": u"lotValues",
}
],
)
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 5000000}, "relatedLot": self.lot_id}],
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [{u"value": [u"value of bid should be less than value of lot"]}],
u"location": u"body",
u"name": u"lotValues",
}
],
)
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500, "valueAddedTaxIncluded": False}, "relatedLot": self.lot_id}],
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [
{
u"value": [
u"valueAddedTaxIncluded of bid should be identical to valueAddedTaxIncluded of value of lot"
]
}
],
u"location": u"body",
u"name": u"lotValues",
}
],
)
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500, "currency": "USD"}, "relatedLot": self.lot_id}],
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [{u"value": [u"currency of bid should be identical to currency of value of lot"]}],
u"location": u"body",
u"name": u"lotValues",
}
],
)
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": test_organization,
"lotValues": [{"value": {"amount": 500}, "relatedLot": self.lot_id}],
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertIn(u"invalid literal for int() with base 10", response.json["errors"][0]["description"])
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": self.lot_id}],
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{u"description": [u"All features parameters is required."], u"location": u"body", u"name": u"parameters"}],
)
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": self.lot_id}],
"parameters": [{"code": "code_item", "value": 0.01}],
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{u"description": [u"All features parameters is required."], u"location": u"body", u"name": u"parameters"}],
)
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": self.lot_id}],
"parameters": [{"code": "code_invalid", "value": 0.01}],
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [{u"code": [u"code should be one of feature code."]}],
u"location": u"body",
u"name": u"parameters",
}
],
)
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": self.lot_id}],
"parameters": [
{"code": "code_item", "value": 0.01},
{"code": "code_tenderer", "value": 0},
{"code": "code_lot", "value": 0.01},
],
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u"description": [{u"value": [u"value should be one of feature value."]}],
u"location": u"body",
u"name": u"parameters",
}
],
)
def create_tender_bid_feature(self):
request_path = "/tenders/{}/bids".format(self.tender_id)
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": self.lot_id}],
"parameters": [
{"code": "code_item", "value": 0.01},
{"code": "code_tenderer", "value": 0.01},
{"code": "code_lot", "value": 0.01},
],
}
},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
bid = response.json["data"]
self.assertEqual(bid["tenderers"][0]["name"], test_organization["name"])
self.assertIn("id", bid)
self.assertIn(bid["id"], response.headers["Location"])
self.set_status("complete")
response = self.app.post_json(
request_path,
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": self.lot_id}],
"parameters": [
{"code": "code_item", "value": 0.01},
{"code": "code_tenderer", "value": 0.01},
{"code": "code_lot", "value": 0.01},
],
}
},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["errors"][0]["description"], "Can't add bid in current (complete) tender status")
# Tender Lot Process Test
def proc_1lot_0bid(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
lots = []
for i in self.initial_lots:
lot = deepcopy(i)
lot["id"] = uuid4().hex
lots.append(lot)
self.initial_data["lots"] = self.initial_lots = lots
data = deepcopy(self.initial_data)
data["agreements"] = [test_agreement]
data["agreements"][0]["id"] = "1" * 32
for i, item in enumerate(self.initial_data["items"]):
item["relatedLot"] = lots[i % len(lots)]["id"]
response = self.app.post_json("/tenders", {"data": data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# switch to active.enquiries
self.set_status("active.enquiries")
lot_id = self.initial_lots[0]["id"]
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token), {"data": {"items": [{"relatedLot": lot_id}]}}
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
response = self.set_status(
"active.tendering",
{
"lots": [
{
"auctionPeriod": {
"startDate": (get_now() + timedelta(days=self.days_till_auction_starts)).isoformat()
}
}
]
},
)
self.assertIn("auctionPeriod", response.json["data"]["lots"][0])
# switch to unsuccessful
response = self.set_status(
"active.auction", {"lots": [{"auctionPeriod": {"startDate": None}}], "status": "active.tendering"}
)
self.app.authorization = ("Basic", ("chronograph", ""))
response = self.app.patch_json("/tenders/{}".format(tender_id), {"data": {"id": tender_id}})
self.assertEqual(response.json["data"]["lots"][0]["status"], "unsuccessful")
self.assertEqual(response.json["data"]["status"], "unsuccessful")
def proc_1lot_1bid(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
lots = []
data = deepcopy(self.initial_data)
for i in self.initial_lots:
lot = deepcopy(i)
lot["id"] = uuid4().hex
lots.append(lot)
data["lots"] = self.initial_lots = lots
for i, item in enumerate(data["items"]):
item["relatedLot"] = lots[i % len(lots)]["id"]
response = self.app.post_json("/tenders", {"data": data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# switch to active.enquiries
self.set_status("active.enquiries")
lot_id = self.initial_lots[0]["id"]
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token), {"data": {"items": [{"relatedLot": lot_id}]}}
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
response = self.set_status(
"active.tendering",
{
"lots": [
{
"auctionPeriod": {
"startDate": (get_now() + timedelta(days=self.days_till_auction_starts)).isoformat()
}
}
]
},
)
self.assertIn("auctionPeriod", response.json["data"]["lots"][0])
# create bid
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": {"tenderers": [test_organization], "lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id}]}},
)
# switch to active.qualification
response = self.set_status("active.tendering", start_end="end")
self.app.authorization = ("Basic", ("chronograph", ""))
response = self.app.patch_json("/tenders/{}".format(tender_id), {"data": {"id": tender_id}})
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending"][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token), {"data": {"status": "active"}}
)
# get contract id
response = self.app.get("/tenders/{}".format(tender_id))
contract_id = response.json["data"]["contracts"][-1]["id"]
# after stand slill period
self.set_status("active.awarded", start_end="end")
# time travel
tender = self.db.get(tender_id)
self.db.save(tender)
# sign contract
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/contracts/{}?acc_token={}".format(tender_id, contract_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# check status
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(tender_id))
self.assertEqual(response.json["data"]["lots"][0]["status"], "complete")
self.assertEqual(response.json["data"]["status"], "complete")
def proc_1lot_2bid(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
lots = []
data = deepcopy(self.initial_data)
for i in self.initial_lots:
lot = deepcopy(i)
lot["id"] = uuid4().hex
lots.append(lot)
data["lots"] = self.initial_lots = lots
for i, item in enumerate(data["items"]):
item["relatedLot"] = lots[i % len(lots)]["id"]
response = self.app.post_json("/tenders", {"data": data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# switch to active.enquiries
self.set_status("active.enquiries")
# add lot
lot_id = self.initial_lots[0]["id"]
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token), {"data": {"items": [{"relatedLot": lot_id}]}}
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
response = self.set_status(
"active.tendering",
{
"lots": [
{
"auctionPeriod": {
"startDate": (get_now() + timedelta(days=self.days_till_auction_starts)).isoformat()
}
}
]
},
)
self.assertIn("auctionPeriod", response.json["data"]["lots"][0])
# create bid
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": {"tenderers": [test_organization], "lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id}]}},
)
bid_id = response.json["data"]["id"]
bid_token = response.json["access"]["token"]
# create second bid
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": {"tenderers": [test_organization], "lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id}]}},
)
# switch to active.auction
self.set_status("active.auction")
# get auction info
self.app.authorization = ("Basic", ("auction", ""))
response = self.app.get("/tenders/{}/auction".format(tender_id))
auction_bids_data = response.json["data"]["bids"]
# posting auction urls
response = self.app.patch_json(
"/tenders/{}/auction/{}".format(tender_id, lot_id),
{
"data": {
"lots": [
{"id": i["id"], "auctionUrl": "https://tender.auction.url"} for i in response.json["data"]["lots"]
],
"bids": [
{
"id": i["id"],
"lotValues": [
{
"relatedLot": j["relatedLot"],
"participationUrl": "https://tender.auction.url/for_bid/{}".format(i["id"]),
}
for j in i["lotValues"]
],
}
for i in auction_bids_data
],
}
},
)
# view bid participationUrl
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/bids/{}?acc_token={}".format(tender_id, bid_id, bid_token))
self.assertEqual(
response.json["data"]["lotValues"][0]["participationUrl"],
"https://tender.auction.url/for_bid/{}".format(bid_id),
)
# posting auction results
self.app.authorization = ("Basic", ("auction", ""))
response = self.app.post_json(
"/tenders/{}/auction/{}".format(tender_id, lot_id), {"data": {"bids": auction_bids_data}}
)
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending"][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token), {"data": {"status": "active"}}
)
# get contract id
response = self.app.get("/tenders/{}".format(tender_id))
contract_id = response.json["data"]["contracts"][-1]["id"]
# after stand slill period
self.set_status("active.awarded", start_end="end")
# time travel
tender = self.db.get(tender_id)
self.db.save(tender)
# sign contract
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/contracts/{}?acc_token={}".format(tender_id, contract_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# check status
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(tender_id))
self.assertEqual(response.json["data"]["lots"][0]["status"], "complete")
self.assertEqual(response.json["data"]["status"], "complete")
def proc_2lot_0bid(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# switch to active.enquiries
self.set_status("active.enquiries")
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
# add item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
response = self.set_status(
"active.tendering",
{
"lots": [
{
"auctionPeriod": {
"startDate": (get_now() + timedelta(days=self.days_till_auction_starts)).isoformat()
}
}
for i in lots
]
},
)
self.assertTrue(all(["auctionPeriod" in i for i in response.json["data"]["lots"]]))
# switch to unsuccessful
response = self.set_status(
"active.auction", {"lots": [{"auctionPeriod": {"startDate": None}} for i in lots], "status": "active.tendering"}
)
self.app.authorization = ("Basic", ("chronograph", ""))
response = self.app.patch_json("/tenders/{}".format(tender_id), {"data": {"id": tender_id}})
self.assertTrue(all([i["status"] == "unsuccessful" for i in response.json["data"]["lots"]]))
self.assertEqual(response.json["data"]["status"], "unsuccessful")
def proc_2lot_2can(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# switch to active.enquiries
self.set_status("active.enquiries")
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
# add item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
response = self.set_status(
"active.tendering",
{
"lots": [
{
"auctionPeriod": {
"startDate": (get_now() + timedelta(days=self.days_till_auction_starts)).isoformat()
}
}
for i in lots
]
},
)
self.assertTrue(all(["auctionPeriod" in i for i in response.json["data"]["lots"]]))
# cancel every lot
for lot_id in lots:
response = self.app.post_json(
"/tenders/{}/cancellations?acc_token={}".format(tender_id, owner_token),
{
"data": {
"reason": "cancellation reason",
"status": "active",
"cancellationOf": "lot",
"relatedLot": lot_id,
}
},
)
response = self.app.get("/tenders/{}".format(tender_id))
self.assertTrue(all([i["status"] == "cancelled" for i in response.json["data"]["lots"]]))
self.assertEqual(response.json["data"]["status"], "cancelled")
def proc_2lot_2bid_0com_1can_before_auction(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# switch to active.enquiries
self.set_status("active.enquiries")
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
# add item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
response = self.set_status(
"active.tendering",
{
"lots": [
{
"auctionPeriod": {
"startDate": (get_now() + timedelta(days=self.days_till_auction_starts)).isoformat()
}
}
for i in lots
]
},
)
# create bid
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots],
}
},
)
# for first lot
lot_id = lots[0]
# create bid #2 for 1 lot
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": {"tenderers": [test_organization], "lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id}]}},
)
# cancel lot
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/cancellations?acc_token={}".format(tender_id, owner_token),
{"data": {"reason": "cancellation reason", "status": "active", "cancellationOf": "lot", "relatedLot": lot_id}},
)
# switch to active.qualification
response = self.set_status("active.auction", {"status": "active.tendering"})
self.app.authorization = ("Basic", ("chronograph", ""))
response = self.app.patch_json("/tenders/{}".format(tender_id), {"data": {"id": tender_id}})
self.assertEqual(response.json["data"]["status"], "active.qualification")
# for second lot
lot_id = lots[1]
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as unsuccessful
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "unsuccessful"}},
)
# after stand slill period
self.set_status("active.awarded", start_end="end")
# time travel
tender = self.db.get(tender_id)
self.db.save(tender)
# check tender status
self.app.authorization = ("Basic", ("chronograph", ""))
response = self.app.patch_json("/tenders/{}".format(tender_id), {"data": {"id": tender_id}})
# check status
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(tender_id))
self.assertEqual([i["status"] for i in response.json["data"]["lots"]], [u"cancelled", u"unsuccessful"])
self.assertEqual(response.json["data"]["status"], "unsuccessful")
def proc_2lot_1bid_0com_1can(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# switch to active.enquiries
self.set_status("active.enquiries")
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
# add item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
response = self.set_status(
"active.tendering",
{
"lots": [
{
"auctionPeriod": {
"startDate": (get_now() + timedelta(days=self.days_till_auction_starts)).isoformat()
}
}
for i in lots
]
},
)
# create bid
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots],
}
},
)
# switch to active.qualification
response = self.set_status(
"active.auction", {"lots": [{"auctionPeriod": {"startDate": None}} for i in lots], "status": "active.tendering"}
)
self.app.authorization = ("Basic", ("chronograph", ""))
response = self.app.patch_json("/tenders/{}".format(tender_id), {"data": {"id": tender_id}})
# for first lot
lot_id = lots[0]
# cancel lot
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/cancellations?acc_token={}".format(tender_id, owner_token),
{"data": {"reason": "cancellation reason", "status": "active", "cancellationOf": "lot", "relatedLot": lot_id}},
)
# for second lot
lot_id = lots[1]
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as unsuccessful
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "unsuccessful"}},
)
# after stand slill period
self.set_status("active.awarded", start_end="end")
# time travel
tender = self.db.get(tender_id)
self.db.save(tender)
# check tender status
self.app.authorization = ("Basic", ("chronograph", ""))
response = self.app.patch_json("/tenders/{}".format(tender_id), {"data": {"id": tender_id}})
# check status
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(tender_id))
self.assertEqual([i["status"] for i in response.json["data"]["lots"]], [u"cancelled", u"unsuccessful"])
self.assertEqual(response.json["data"]["status"], "unsuccessful")
def proc_2lot_1bid_2com_1win(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# switch to active.enquiries
self.set_status("active.enquiries")
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
# add item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
response = self.set_status(
"active.tendering",
{
"lots": [
{
"auctionPeriod": {
"startDate": (get_now() + timedelta(days=self.days_till_auction_starts)).isoformat()
}
}
for i in lots
]
},
)
# create bid
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots],
}
},
)
# switch to active.qualification
response = self.set_status(
"active.auction", {"lots": [{"auctionPeriod": {"startDate": None}} for i in lots], "status": "active.tendering"}
)
self.app.authorization = ("Basic", ("chronograph", ""))
response = self.app.patch_json("/tenders/{}".format(tender_id), {"data": {"id": tender_id}})
for lot_id in lots:
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "active"}},
)
# get contract id
response = self.app.get("/tenders/{}".format(tender_id))
contract_id = response.json["data"]["contracts"][-1]["id"]
# after stand slill period
self.set_status("active.awarded", start_end="end")
# time travel
tender = self.db.get(tender_id)
self.db.save(tender)
# sign contract
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/contracts/{}?acc_token={}".format(tender_id, contract_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# check status
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(tender_id))
self.assertTrue(all([i["status"] == "complete" for i in response.json["data"]["lots"]]))
self.assertEqual(response.json["data"]["status"], "complete")
def proc_2lot_1bid_0com_0win(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# switch to active.enquiries
self.set_status("active.enquiries")
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
# add item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
response = self.set_status(
"active.tendering",
{
"lots": [
{
"auctionPeriod": {
"startDate": (get_now() + timedelta(days=self.days_till_auction_starts)).isoformat()
}
}
for i in lots
]
},
)
# create bid
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots],
}
},
)
# switch to active.qualification
response = self.set_status(
"active.auction", {"lots": [{"auctionPeriod": {"startDate": None}} for i in lots], "status": "active.tendering"}
)
self.app.authorization = ("Basic", ("chronograph", ""))
response = self.app.patch_json("/tenders/{}".format(tender_id), {"data": {"id": tender_id}})
# for every lot
for lot_id in lots:
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as unsuccessful
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "unsuccessful"}},
)
# after stand slill period
self.set_status("active.awarded", start_end="end")
# time travel
tender = self.db.get(tender_id)
self.db.save(tender)
# check tender status
self.set_status("active.awarded", start_end="end")
self.app.authorization = ("Basic", ("chronograph", ""))
response = self.app.patch_json("/tenders/{}".format(tender_id), {"data": {"id": tender_id}})
# check status
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(tender_id))
self.assertTrue(all([i["status"] == "unsuccessful" for i in response.json["data"]["lots"]]))
self.assertEqual(response.json["data"]["status"], "unsuccessful")
def proc_2lot_1bid_1com_1win(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# switch to active.enquiries
self.set_status("active.enquiries")
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
# add item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
response = self.set_status(
"active.tendering",
{
"lots": [
{
"auctionPeriod": {
"startDate": (get_now() + timedelta(days=self.days_till_auction_starts)).isoformat()
}
}
for i in lots
]
},
)
# create bid
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots],
}
},
)
# switch to active.qualification
response = self.set_status(
"active.auction", {"lots": [{"auctionPeriod": {"startDate": None}} for i in lots], "status": "active.tendering"}
)
self.app.authorization = ("Basic", ("chronograph", ""))
response = self.app.patch_json("/tenders/{}".format(tender_id), {"data": {"id": tender_id}})
# for first lot
lot_id = lots[0]
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token), {"data": {"status": "active"}}
)
# get contract id
response = self.app.get("/tenders/{}".format(tender_id))
contract_id = response.json["data"]["contracts"][-1]["id"]
# after stand slill period
self.set_status("active.awarded", start_end="end")
# time travel
tender = self.db.get(tender_id)
self.db.save(tender)
# sign contract
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/contracts/{}?acc_token={}".format(tender_id, contract_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# for second lot
lot_id = lots[1]
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as unsuccessful
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "unsuccessful"}},
)
# after stand slill period
self.set_status("active.awarded", start_end="end")
# time travel
tender = self.db.get(tender_id)
self.db.save(tender)
# check tender status
self.app.authorization = ("Basic", ("chronograph", ""))
response = self.app.patch_json("/tenders/{}".format(tender_id), {"data": {"id": tender_id}})
# check status
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(tender_id))
self.assertEqual([i["status"] for i in response.json["data"]["lots"]], [u"complete", u"unsuccessful"])
self.assertEqual(response.json["data"]["status"], "complete")
def proc_2lot_2bid_2com_2win(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# switch to active.enquiries
self.set_status("active.enquiries")
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
self.initial_lots = lots
# add item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
response = self.set_status(
"active.tendering",
{
"lots": [
{
"auctionPeriod": {
"startDate": (get_now() + timedelta(days=self.days_till_auction_starts)).isoformat()
}
}
for i in lots
]
},
)
# create bid
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots],
}
},
)
# create second bid
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots],
}
},
)
# switch to active.auction
self.set_status("active.auction")
# get auction info
self.app.authorization = ("Basic", ("auction", ""))
response = self.app.get("/tenders/{}/auction".format(tender_id))
auction_bids_data = response.json["data"]["bids"]
for lot_id in lots:
# posting auction urls
response = self.app.patch_json(
"/tenders/{}/auction/{}".format(tender_id, lot_id),
{
"data": {
"lots": [
{"id": i["id"], "auctionUrl": "https://tender.auction.url"}
for i in response.json["data"]["lots"]
],
"bids": [
{
"id": i["id"],
"lotValues": [
{
"relatedLot": j["relatedLot"],
"participationUrl": "https://tender.auction.url/for_bid/{}".format(i["id"]),
}
for j in i["lotValues"]
],
}
for i in auction_bids_data
],
}
},
)
# posting auction results
self.app.authorization = ("Basic", ("auction", ""))
response = self.app.post_json(
"/tenders/{}/auction/{}".format(tender_id, lot_id), {"data": {"bids": auction_bids_data}}
)
# for first lot
lot_id = lots[0]
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token), {"data": {"status": "active"}}
)
# get contract id
response = self.app.get("/tenders/{}".format(tender_id))
contract_id = response.json["data"]["contracts"][-1]["id"]
# after stand slill period
self.set_status("active.awarded", start_end="end")
# time travel
tender = self.db.get(tender_id)
self.db.save(tender)
# sign contract
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/contracts/{}?acc_token={}".format(tender_id, contract_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# for second lot
lot_id = lots[1]
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as unsuccessful
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "unsuccessful"}},
)
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token), {"data": {"status": "active"}}
)
# get contract id
response = self.app.get("/tenders/{}".format(tender_id))
contract_id = response.json["data"]["contracts"][-1]["id"]
# after stand slill period
self.set_status("active.awarded", start_end="end")
# time travel
tender = self.db.get(tender_id)
self.db.save(tender)
# sign contract
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/contracts/{}?acc_token={}".format(tender_id, contract_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# check status
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(tender_id))
self.assertTrue(all([i["status"] == "complete" for i in response.json["data"]["lots"]]))
self.assertEqual(response.json["data"]["status"], "complete")
def proc_2lot_1feature_2bid_2com_2win(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# switch to active.enquiries
self.set_status("active.enquiries")
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
self.initial_lots = lots
# add item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
# add features
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{
"data": {
"features": [
{
"code": "code_item",
"featureOf": "item",
"relatedItem": response.json["data"]["items"][0]["id"],
"title": u"item feature",
"enum": [{"value": 0.1, "title": u"good"}, {"value": 0.2, "title": u"best"}],
}
]
}
},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
response = self.set_status(
"active.tendering",
{
"lots": [
{
"auctionPeriod": {
"startDate": (get_now() + timedelta(days=self.days_till_auction_starts)).isoformat()
}
}
for i in lots
]
},
)
# create bid
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": lots[0]}],
"parameters": [{"code": "code_item", "value": 0.2}],
}
},
)
# create second bid
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": {"tenderers": [test_organization], "lotValues": [{"value": {"amount": 500}, "relatedLot": lots[1]}]}},
)
# switch to active.qualification
response = self.set_status("active.auction", {"status": "active.tendering"})
self.app.authorization = ("Basic", ("chronograph", ""))
response = self.app.patch_json("/tenders/{}".format(tender_id), {"data": {"id": tender_id}})
# for first lot
lot_id = lots[0]
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token), {"data": {"status": "active"}}
)
# get contract id
response = self.app.get("/tenders/{}".format(tender_id))
contract_id = response.json["data"]["contracts"][-1]["id"]
# after stand slill period
self.set_status("active.awarded", start_end="end")
# time travel
tender = self.db.get(tender_id)
self.db.save(tender)
# sign contract
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/contracts/{}?acc_token={}".format(tender_id, contract_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# for second lot
lot_id = lots[1]
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token), {"data": {"status": "active"}}
)
# get contract id
response = self.app.get("/tenders/{}".format(tender_id))
contract_id = response.json["data"]["contracts"][-1]["id"]
# after stand slill period
self.set_status("active.awarded", start_end="end")
# time travel
tender = self.db.get(tender_id)
self.db.save(tender)
# sign contract
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/contracts/{}?acc_token={}".format(tender_id, contract_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# check status
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(tender_id))
self.assertTrue(all([i["status"] == "complete" for i in response.json["data"]["lots"]]))
self.assertEqual(response.json["data"]["status"], "complete")
def proc_2lot_2diff_bids_check_auction(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# switch to active.enquiries
self.set_status("active.enquiries")
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
self.initial_lots = lots
# add item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
response = self.set_status(
"active.tendering",
{
"lots": [
{
"auctionPeriod": {
"startDate": (get_now() + timedelta(days=self.days_till_auction_starts)).isoformat()
}
}
for i in lots
]
},
)
# create bid (for 2 lots)
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{
"data": {
"tenderers": [test_organization],
"lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots],
}
},
)
# create second bid (only for 1 lot)
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": {"tenderers": [test_organization], "lotValues": [{"value": {"amount": 500}, "relatedLot": lots[0]}]}},
)
# switch to active.auction
self.set_status("active.auction")
# check lots auction period
# first lot (with 2 bids) should have 'start date' and 'should start after' field
response = self.app.get("/tenders/{}".format(tender_id))
self.assertIn("auctionPeriod", response.json["data"]["lots"][0])
self.assertIn("startDate", response.json["data"]["lots"][0]["auctionPeriod"])
self.assertIn("shouldStartAfter", response.json["data"]["lots"][0]["auctionPeriod"])
# second lot (with only 1 bid) should have 'start date' and no 'should start after' field
self.assertIn("auctionPeriod", response.json["data"]["lots"][1])
self.assertIn("startDate", response.json["data"]["lots"][1]["auctionPeriod"])
self.assertNotIn("shouldStartAfter", response.json["data"]["lots"][1]["auctionPeriod"])
def patch_lot_guarantee_on_active_enquiries(self):
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertEqual(response.status, "200 OK")
lot_id = response.json["data"]["lots"][0]["id"]
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot_id, self.tender_token),
{"data": {"guarantee": {"amount": 100500, "currency": "USD"}}},
)
self.assertEqual(response.status, "200 OK")
self.assertIn("guarantee", response.json["data"])
self.assertEqual(response.json["data"]["guarantee"], {"amount": 100500, "currency": "USD"})
| 41.328556
| 120
| 0.592868
| 12,648
| 112,455
| 5.151091
| 0.025221
| 0.094166
| 0.136268
| 0.073353
| 0.947107
| 0.934184
| 0.923931
| 0.911483
| 0.893033
| 0.882826
| 0
| 0.013142
| 0.225939
| 112,455
| 2,720
| 121
| 41.34375
| 0.735315
| 0.045316
| 0
| 0.661978
| 0
| 0.00044
| 0.241221
| 0.044704
| 0
| 0
| 0
| 0
| 0.201758
| 1
| 0.013626
| false
| 0
| 0.003077
| 0
| 0.016703
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6a91ae5385fa32fcdbe85decd2dd101b5347806a
| 186
|
py
|
Python
|
maui/backend/serial/__init__.py
|
cstatz/maui
|
db99986e93699ee20c5cffdd5b4ee446f8607c5d
|
[
"BSD-3-Clause"
] | null | null | null |
maui/backend/serial/__init__.py
|
cstatz/maui
|
db99986e93699ee20c5cffdd5b4ee446f8607c5d
|
[
"BSD-3-Clause"
] | null | null | null |
maui/backend/serial/__init__.py
|
cstatz/maui
|
db99986e93699ee20c5cffdd5b4ee446f8607c5d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
__author__ = 'christoph.statz <at> tu-dresden.de'
| 20.666667
| 49
| 0.774194
| 24
| 186
| 5.25
| 0.708333
| 0.238095
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006211
| 0.134409
| 186
| 8
| 50
| 23.25
| 0.776398
| 0.112903
| 0
| 0
| 0
| 0
| 0.209877
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6a93e2b56c945e5413a72bc46a1fe705e3e3b070
| 330
|
py
|
Python
|
transportation_problem/__init__.py
|
cdfmlr/TransportationProblem
|
9a57d36d46f5714f77e14acf126804d1a9a6be77
|
[
"MIT"
] | 4
|
2020-11-27T07:13:08.000Z
|
2021-11-11T08:10:18.000Z
|
transportation_problem/__init__.py
|
cdfmlr/TransportationProblem
|
9a57d36d46f5714f77e14acf126804d1a9a6be77
|
[
"MIT"
] | null | null | null |
transportation_problem/__init__.py
|
cdfmlr/TransportationProblem
|
9a57d36d46f5714f77e14acf126804d1a9a6be77
|
[
"MIT"
] | 2
|
2020-12-08T00:58:42.000Z
|
2021-11-15T07:27:19.000Z
|
from .problem import TransportationProblem
from .closed_loop_method import ClosedLoopMethod
from .initer import TransportationIniter, MinimumElementIniter, NorthwestCornerIniter, VogelIniter
from .checker import TransportationChecker, PotentialChecker
from .optimizer import TransportationOptimizer, ClosedLoopAdjustmentOptimizer
| 55
| 98
| 0.893939
| 27
| 330
| 10.851852
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075758
| 330
| 5
| 99
| 66
| 0.960656
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6ac21816a83151aa8120412e2ad0bc0f21717d43
| 65
|
py
|
Python
|
wouso/interface/apps/qproposal/models.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 117
|
2015-01-02T18:07:33.000Z
|
2021-01-06T22:36:25.000Z
|
wouso/interface/apps/qproposal/models.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 229
|
2015-01-12T07:07:58.000Z
|
2019-10-12T08:27:01.000Z
|
wouso/interface/apps/qproposal/models.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 96
|
2015-01-07T05:26:09.000Z
|
2020-06-25T07:28:51.000Z
|
from wouso.core.common import App
class Qproposal(App):
pass
| 16.25
| 33
| 0.753846
| 10
| 65
| 4.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169231
| 65
| 4
| 34
| 16.25
| 0.907407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
6ac23fef369b838b7e8222a622803e5eb9a8bce3
| 12,009
|
py
|
Python
|
tests/test_focal_loss.py
|
function2-llx/MONAI
|
4cddaa830b61b88ec78e089bb5f21e05bb1a78f4
|
[
"Apache-2.0"
] | null | null | null |
tests/test_focal_loss.py
|
function2-llx/MONAI
|
4cddaa830b61b88ec78e089bb5f21e05bb1a78f4
|
[
"Apache-2.0"
] | null | null | null |
tests/test_focal_loss.py
|
function2-llx/MONAI
|
4cddaa830b61b88ec78e089bb5f21e05bb1a78f4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
import torch.nn as nn
import torch.nn.functional as F
from monai.losses import FocalLoss
from monai.networks import one_hot
from tests.utils import test_script_save
class TestFocalLoss(unittest.TestCase):
def test_consistency_with_cross_entropy_2d(self):
"""For gamma=0 the focal loss reduces to the cross entropy loss"""
focal_loss = FocalLoss(to_onehot_y=False, gamma=0.0, reduction="mean", weight=1.0)
ce = nn.BCEWithLogitsLoss(reduction="mean")
max_error = 0
class_num = 10
batch_size = 128
for _ in range(100):
# Create a random tensor of shape (batch_size, class_num, 8, 4)
x = torch.rand(batch_size, class_num, 8, 4, requires_grad=True)
# Create a random batch of classes
l = torch.randint(low=0, high=2, size=(batch_size, class_num, 8, 4)).float()
if torch.cuda.is_available():
x = x.cuda()
l = l.cuda()
output0 = focal_loss(x, l)
output1 = ce(x, l)
a = float(output0.cpu().detach())
b = float(output1.cpu().detach())
if abs(a - b) > max_error:
max_error = abs(a - b)
self.assertAlmostEqual(max_error, 0.0, places=3)
def test_consistency_with_cross_entropy_2d_no_reduction(self):
"""For gamma=0 the focal loss reduces to the cross entropy loss"""
import numpy as np
focal_loss = FocalLoss(to_onehot_y=False, gamma=0.0, reduction="none", weight=1.0)
ce = nn.BCEWithLogitsLoss(reduction="none")
max_error = 0
class_num = 10
batch_size = 128
for _ in range(100):
# Create a random tensor of shape (batch_size, class_num, 8, 4)
x = torch.rand(batch_size, class_num, 8, 4, requires_grad=True)
# Create a random batch of classes
l = torch.randint(low=0, high=2, size=(batch_size, class_num, 8, 4)).float()
if torch.cuda.is_available():
x = x.cuda()
l = l.cuda()
output0 = focal_loss(x, l)
output1 = ce(x, l)
a = output0.cpu().detach().numpy()
b = output1.cpu().detach().numpy()
error = np.abs(a - b)
max_error = np.maximum(error, max_error)
assert np.allclose(max_error, 0)
def test_consistency_with_cross_entropy_2d_onehot_label(self):
"""For gamma=0 the focal loss reduces to the cross entropy loss"""
focal_loss = FocalLoss(to_onehot_y=True, gamma=0.0, reduction="mean")
ce = nn.BCEWithLogitsLoss(reduction="mean")
max_error = 0
class_num = 10
batch_size = 128
for _ in range(100):
# Create a random tensor of shape (batch_size, class_num, 8, 4)
x = torch.rand(batch_size, class_num, 8, 4, requires_grad=True)
# Create a random batch of classes
l = torch.randint(low=0, high=class_num, size=(batch_size, 1, 8, 4))
if torch.cuda.is_available():
x = x.cuda()
l = l.cuda()
output0 = focal_loss(x, l)
output1 = ce(x, one_hot(l, num_classes=class_num))
a = float(output0.cpu().detach())
b = float(output1.cpu().detach())
if abs(a - b) > max_error:
max_error = abs(a - b)
self.assertAlmostEqual(max_error, 0.0, places=3)
def test_consistency_with_cross_entropy_classification(self):
"""for gamma=0 the focal loss reduces to the cross entropy loss"""
focal_loss = FocalLoss(to_onehot_y=True, gamma=0.0, reduction="mean")
ce = nn.BCEWithLogitsLoss(reduction="mean")
max_error = 0
class_num = 10
batch_size = 128
for _ in range(100):
# Create a random scores tensor of shape (batch_size, class_num)
x = torch.rand(batch_size, class_num, requires_grad=True)
# Create a random batch of classes
l = torch.randint(low=0, high=class_num, size=(batch_size, 1))
l = l.long()
if torch.cuda.is_available():
x = x.cuda()
l = l.cuda()
output0 = focal_loss(x, l)
output1 = ce(x, one_hot(l, num_classes=class_num))
a = float(output0.cpu().detach())
b = float(output1.cpu().detach())
if abs(a - b) > max_error:
max_error = abs(a - b)
self.assertAlmostEqual(max_error, 0.0, places=3)
def test_consistency_with_cross_entropy_classification_01(self):
# for gamma=0.1 the focal loss differs from the cross entropy loss
focal_loss = FocalLoss(to_onehot_y=True, gamma=0.1, reduction="mean")
ce = nn.BCEWithLogitsLoss(reduction="mean")
max_error = 0
class_num = 10
batch_size = 128
for _ in range(100):
# Create a random scores tensor of shape (batch_size, class_num)
x = torch.rand(batch_size, class_num, requires_grad=True)
# Create a random batch of classes
l = torch.randint(low=0, high=class_num, size=(batch_size, 1))
l = l.long()
if torch.cuda.is_available():
x = x.cuda()
l = l.cuda()
output0 = focal_loss(x, l)
output1 = ce(x, one_hot(l, num_classes=class_num))
a = float(output0.cpu().detach())
b = float(output1.cpu().detach())
if abs(a - b) > max_error:
max_error = abs(a - b)
self.assertNotAlmostEqual(max_error, 0.0, places=3)
def test_bin_seg_2d(self):
# define 2d examples
target = torch.tensor([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]])
# add another dimension corresponding to the batch (batch size = 1 here)
target = target.unsqueeze(0) # shape (1, H, W)
pred_very_good = 100 * F.one_hot(target, num_classes=2).permute(0, 3, 1, 2).float() - 50.0
# initialize the mean dice loss
loss = FocalLoss(to_onehot_y=True)
# focal loss for pred_very_good should be close to 0
target = target.unsqueeze(1) # shape (1, 1, H, W)
focal_loss_good = float(loss(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
def test_empty_class_2d(self):
num_classes = 2
# define 2d examples
target = torch.tensor([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
# add another dimension corresponding to the batch (batch size = 1 here)
target = target.unsqueeze(0) # shape (1, H, W)
pred_very_good = 1000 * F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2).float() - 500.0
# initialize the mean dice loss
loss = FocalLoss(to_onehot_y=True)
# focal loss for pred_very_good should be close to 0
target = target.unsqueeze(1) # shape (1, 1, H, W)
focal_loss_good = float(loss(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
def test_multi_class_seg_2d(self):
num_classes = 6 # labels 0 to 5
# define 2d examples
target = torch.tensor([[0, 0, 0, 0], [0, 1, 2, 0], [0, 3, 4, 0], [0, 0, 0, 0]])
# add another dimension corresponding to the batch (batch size = 1 here)
target = target.unsqueeze(0) # shape (1, H, W)
pred_very_good = 1000 * F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2).float() - 500.0
# initialize the mean dice loss
loss = FocalLoss(to_onehot_y=True)
loss_onehot = FocalLoss(to_onehot_y=False)
# focal loss for pred_very_good should be close to 0
target_one_hot = F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2) # test one hot
target = target.unsqueeze(1) # shape (1, 1, H, W)
focal_loss_good = float(loss(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
focal_loss_good = float(loss_onehot(pred_very_good, target_one_hot).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
def test_bin_seg_3d(self):
num_classes = 2 # labels 0, 1
# define 3d examples
target = torch.tensor(
[
# raw 0
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
# raw 1
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
# raw 2
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
]
)
# add another dimension corresponding to the batch (batch size = 1 here)
target = target.unsqueeze(0) # shape (1, H, W, D)
target_one_hot = F.one_hot(target, num_classes=num_classes).permute(0, 4, 1, 2, 3) # test one hot
pred_very_good = 1000 * F.one_hot(target, num_classes=num_classes).permute(0, 4, 1, 2, 3).float() - 500.0
# initialize the mean dice loss
loss = FocalLoss(to_onehot_y=True)
loss_onehot = FocalLoss(to_onehot_y=False)
# focal loss for pred_very_good should be close to 0
target = target.unsqueeze(1) # shape (1, 1, H, W)
focal_loss_good = float(loss(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
focal_loss_good = float(loss_onehot(pred_very_good, target_one_hot).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
def test_foreground(self):
background = torch.ones(1, 1, 5, 5)
foreground = torch.zeros(1, 1, 5, 5)
target = torch.cat((background, foreground), dim=1)
input = torch.cat((background, foreground), dim=1)
target[:, 0, 2, 2] = 0
target[:, 1, 2, 2] = 1
fgbg = FocalLoss(to_onehot_y=False, include_background=True)(input, target)
fg = FocalLoss(to_onehot_y=False, include_background=False)(input, target)
self.assertAlmostEqual(float(fgbg.cpu()), 0.1116, places=3)
self.assertAlmostEqual(float(fg.cpu()), 0.1733, places=3)
def test_ill_opts(self):
chn_input = torch.ones((1, 2, 3))
chn_target = torch.ones((1, 2, 3))
with self.assertRaisesRegex(ValueError, ""):
FocalLoss(reduction="unknown")(chn_input, chn_target)
def test_ill_shape(self):
chn_input = torch.ones((1, 2, 3))
chn_target = torch.ones((1, 3))
with self.assertRaisesRegex(ValueError, ""):
FocalLoss(reduction="mean")(chn_input, chn_target)
def test_ill_class_weight(self):
chn_input = torch.ones((1, 4, 3, 3))
chn_target = torch.ones((1, 4, 3, 3))
with self.assertRaisesRegex(ValueError, ""):
FocalLoss(include_background=True, weight=(1.0, 1.0, 2.0))(chn_input, chn_target)
with self.assertRaisesRegex(ValueError, ""):
FocalLoss(include_background=False, weight=(1.0, 1.0, 1.0, 1.0))(chn_input, chn_target)
with self.assertRaisesRegex(ValueError, ""):
FocalLoss(include_background=False, weight=(1.0, 1.0, -1.0))(chn_input, chn_target)
def test_script(self):
loss = FocalLoss()
test_input = torch.ones(2, 2, 8, 8)
test_script_save(loss, test_input, test_input)
if __name__ == "__main__":
unittest.main()
| 44.643123
| 113
| 0.598967
| 1,743
| 12,009
| 3.958692
| 0.118761
| 0.021739
| 0.019565
| 0.01971
| 0.801014
| 0.796957
| 0.773913
| 0.712609
| 0.707391
| 0.707391
| 0
| 0.04919
| 0.28054
| 12,009
| 268
| 114
| 44.809701
| 0.749421
| 0.186194
| 0
| 0.633508
| 0
| 0
| 0.006082
| 0
| 0
| 0
| 0
| 0
| 0.094241
| 1
| 0.073298
| false
| 0
| 0.041885
| 0
| 0.120419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6adc60c9a79e26b4c6128bb88d03518fd6c6bfaf
| 23
|
py
|
Python
|
homura/vision/models/segmentation/__init__.py
|
Fragile-azalea/homura
|
900d1d63affb9c8af3accd9b196b5276cb2e14b6
|
[
"Apache-2.0"
] | 1
|
2020-06-30T01:55:41.000Z
|
2020-06-30T01:55:41.000Z
|
homura/vision/models/segmentation/__init__.py
|
Fragile-azalea/homura
|
900d1d63affb9c8af3accd9b196b5276cb2e14b6
|
[
"Apache-2.0"
] | null | null | null |
homura/vision/models/segmentation/__init__.py
|
Fragile-azalea/homura
|
900d1d63affb9c8af3accd9b196b5276cb2e14b6
|
[
"Apache-2.0"
] | null | null | null |
from .unet import unet
| 11.5
| 22
| 0.782609
| 4
| 23
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6aeee3f3e1331be9de610d50bc15e5835b9579a1
| 76,702
|
py
|
Python
|
datadog_checks_base/tests/test_http.py
|
mxmeinhold/integrations-core
|
26ad0e95a12582220390d04dd3faf9bb76687ee2
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_base/tests/test_http.py
|
mxmeinhold/integrations-core
|
26ad0e95a12582220390d04dd3faf9bb76687ee2
|
[
"BSD-3-Clause"
] | 1
|
2021-02-23T14:03:42.000Z
|
2021-03-25T16:52:05.000Z
|
datadog_checks_base/tests/test_http.py
|
mxmeinhold/integrations-core
|
26ad0e95a12582220390d04dd3faf9bb76687ee2
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import logging
import os
import re
from collections import OrderedDict
import jwt
import mock
import pytest
import requests
import requests_kerberos
import requests_ntlm
import requests_unixsocket
from aws_requests_auth import boto_utils as requests_aws
from requests import auth as requests_auth
from requests.exceptions import ConnectTimeout, ProxyError
from six import iteritems
from datadog_checks.base import AgentCheck, ConfigurationError
from datadog_checks.base.utils.headers import headers as agent_headers
from datadog_checks.base.utils.http import STANDARD_FIELDS, RequestsWrapper, is_uds_url, quote_uds_url
from datadog_checks.base.utils.time import get_timestamp
from datadog_checks.dev import EnvVars, TempDir
from datadog_checks.dev.utils import ON_WINDOWS, read_file, running_on_windows_ci, write_file
pytestmark = pytest.mark.http
DEFAULT_OPTIONS = {
'auth': None,
'cert': None,
'headers': OrderedDict(
[
('User-Agent', 'Datadog Agent/0.0.0'),
('Accept', '*/*'),
('Accept-Encoding', 'gzip, deflate'),
]
),
'proxies': None,
'timeout': (10.0, 10.0),
'verify': True,
}
class TestAttribute:
def test_default(self):
check = AgentCheck('test', {}, [{}])
assert not hasattr(check, '_http')
def test_activate(self):
check = AgentCheck('test', {}, [{}])
assert check.http == check._http
assert isinstance(check.http, RequestsWrapper)
class TestTimeout:
def test_config_default(self):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
# Assert the timeout is slightly larger than a multiple of 3,
# which is the default TCP packet retransmission window. See:
# https://tools.ietf.org/html/rfc2988
assert 0 < http.options['timeout'][0] % 3 <= 1
def test_config_timeout(self):
instance = {'timeout': 24.5}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['timeout'] == (24.5, 24.5)
def test_config_multiple_timeouts(self):
instance = {'read_timeout': 4, 'connect_timeout': 10}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['timeout'] == (10, 4)
def test_config_init_config_override(self):
instance = {}
init_config = {'timeout': 16}
http = RequestsWrapper(instance, init_config)
assert http.options['timeout'] == (16, 16)
class TestHeaders:
def test_agent_headers(self):
# This helper is not used by the RequestsWrapper, but some integrations may use it.
# So we provide a unit test for it.
agent_config = {}
headers = agent_headers(agent_config)
assert headers == DEFAULT_OPTIONS['headers']
def test_config_default(self):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['headers'] == DEFAULT_OPTIONS['headers']
def test_config_headers(self):
headers = OrderedDict((('key1', 'value1'), ('key2', 'value2')))
instance = {'headers': headers}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert list(iteritems(http.options['headers'])) == list(iteritems(headers))
def test_config_headers_string_values(self):
instance = {'headers': {'answer': 42}}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['headers'] == {'answer': '42'}
def test_config_extra_headers(self):
headers = OrderedDict((('key1', 'value1'), ('key2', 'value2')))
instance = {'extra_headers': headers}
init_config = {}
http = RequestsWrapper(instance, init_config)
complete_headers = OrderedDict(DEFAULT_OPTIONS['headers'])
complete_headers.update(headers)
assert list(iteritems(http.options['headers'])) == list(iteritems(complete_headers))
def test_config_extra_headers_string_values(self):
instance = {'extra_headers': {'answer': 42}}
init_config = {}
http = RequestsWrapper(instance, init_config)
complete_headers = dict(DEFAULT_OPTIONS['headers'])
complete_headers.update({'answer': '42'})
assert http.options['headers'] == complete_headers
def test_extra_headers_on_http_method_call(self):
instance = {'extra_headers': {'answer': 42}}
init_config = {}
http = RequestsWrapper(instance, init_config)
complete_headers = dict(DEFAULT_OPTIONS['headers'])
complete_headers.update({'answer': '42'})
extra_headers = {"foo": "bar"}
with mock.patch("requests.get") as get:
http.get("http://example.com/hello", extra_headers=extra_headers)
expected_options = dict(complete_headers)
expected_options.update(extra_headers)
get.assert_called_with(
"http://example.com/hello",
headers=expected_options,
auth=None,
cert=None,
proxies=None,
timeout=(10.0, 10.0),
verify=True,
)
# make sure the original headers are not modified
assert http.options['headers'] == complete_headers
assert extra_headers == {"foo": "bar"}
class TestVerify:
def test_config_default(self):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['verify'] is True
def test_config_verify(self):
instance = {'tls_verify': False}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['verify'] is False
def test_config_ca_cert(self):
instance = {'tls_ca_cert': 'ca_cert'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['verify'] == 'ca_cert'
def test_config_verify_and_ca_cert(self):
instance = {'tls_verify': True, 'tls_ca_cert': 'ca_cert'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['verify'] == 'ca_cert'
class TestCert:
def test_config_default(self):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['cert'] is None
def test_config_cert(self):
instance = {'tls_cert': 'cert'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['cert'] == 'cert'
def test_config_cert_and_private_key(self):
instance = {'tls_cert': 'cert', 'tls_private_key': 'key'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['cert'] == ('cert', 'key')
class TestAuth:
def test_config_default(self):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['auth'] is None
def test_config_basic(self):
instance = {'username': 'user', 'password': 'pass'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['auth'] == ('user', 'pass')
def test_config_basic_authtype(self):
instance = {'username': 'user', 'password': 'pass', 'auth_type': 'basic'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['auth'] == ('user', 'pass')
def test_config_basic_no_legacy_encoding(self):
instance = {'username': 'user', 'password': 'pass', 'use_legacy_auth_encoding': False}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['auth'] == (b'user', b'pass')
def test_config_digest_authtype(self):
instance = {'username': 'user', 'password': 'pass', 'auth_type': 'digest'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert isinstance(http.options['auth'], requests_auth.HTTPDigestAuth)
with mock.patch('datadog_checks.base.utils.http.requests_auth.HTTPDigestAuth') as m:
RequestsWrapper(instance, init_config)
m.assert_called_once_with('user', 'pass')
def test_config_basic_only_username(self):
instance = {'username': 'user'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['auth'] is None
def test_config_basic_only_password(self):
instance = {'password': 'pass'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['auth'] is None
def test_config_kerberos_legacy(self):
instance = {'kerberos_auth': 'required'}
init_config = {}
# Trigger lazy import
http = RequestsWrapper(instance, init_config)
assert isinstance(http.options['auth'], requests_kerberos.HTTPKerberosAuth)
with mock.patch('datadog_checks.base.utils.http.requests_kerberos.HTTPKerberosAuth') as m:
RequestsWrapper(instance, init_config)
m.assert_called_once_with(
mutual_authentication=requests_kerberos.REQUIRED,
delegate=False,
force_preemptive=False,
hostname_override=None,
principal=None,
)
def test_config_kerberos(self):
instance = {'auth_type': 'kerberos', 'kerberos_auth': 'required'}
init_config = {}
# Trigger lazy import
http = RequestsWrapper(instance, init_config)
assert isinstance(http.options['auth'], requests_kerberos.HTTPKerberosAuth)
with mock.patch('datadog_checks.base.utils.http.requests_kerberos.HTTPKerberosAuth') as m:
RequestsWrapper(instance, init_config)
m.assert_called_once_with(
mutual_authentication=requests_kerberos.REQUIRED,
delegate=False,
force_preemptive=False,
hostname_override=None,
principal=None,
)
with mock.patch('datadog_checks.base.utils.http.requests_kerberos.HTTPKerberosAuth') as m:
RequestsWrapper({'auth_type': 'kerberos', 'kerberos_auth': 'optional'}, init_config)
m.assert_called_once_with(
mutual_authentication=requests_kerberos.OPTIONAL,
delegate=False,
force_preemptive=False,
hostname_override=None,
principal=None,
)
with mock.patch('datadog_checks.base.utils.http.requests_kerberos.HTTPKerberosAuth') as m:
RequestsWrapper({'auth_type': 'kerberos', 'kerberos_auth': 'disabled'}, init_config)
m.assert_called_once_with(
mutual_authentication=requests_kerberos.DISABLED,
delegate=False,
force_preemptive=False,
hostname_override=None,
principal=None,
)
def test_config_kerberos_shortcut(self):
instance = {'auth_type': 'kerberos', 'kerberos_auth': True}
init_config = {}
# Trigger lazy import
http = RequestsWrapper(instance, init_config)
assert isinstance(http.options['auth'], requests_kerberos.HTTPKerberosAuth)
with mock.patch('datadog_checks.base.utils.http.requests_kerberos.HTTPKerberosAuth') as m:
RequestsWrapper(instance, init_config)
m.assert_called_once_with(
mutual_authentication=requests_kerberos.REQUIRED,
delegate=False,
force_preemptive=False,
hostname_override=None,
principal=None,
)
def test_config_kerberos_unknown(self):
instance = {'auth_type': 'kerberos', 'kerberos_auth': 'unknown'}
init_config = {}
with pytest.raises(ConfigurationError):
RequestsWrapper(instance, init_config)
def test_config_kerberos_keytab_file(self):
instance = {'auth_type': 'kerberos', 'kerberos_keytab': '/test/file'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert os.environ.get('KRB5_CLIENT_KTNAME') is None
with mock.patch('requests.get', side_effect=lambda *args, **kwargs: os.environ.get('KRB5_CLIENT_KTNAME')):
assert http.get('https://www.google.com') == '/test/file'
assert os.environ.get('KRB5_CLIENT_KTNAME') is None
def test_config_kerberos_cache(self):
instance = {'auth_type': 'kerberos', 'kerberos_cache': '/test/file'}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert os.environ.get('KRB5CCNAME') is None
with mock.patch('requests.get', side_effect=lambda *args, **kwargs: os.environ.get('KRB5CCNAME')):
assert http.get('https://www.google.com') == '/test/file'
assert os.environ.get('KRB5CCNAME') is None
def test_config_kerberos_cache_restores_rollback(self):
instance = {'auth_type': 'kerberos', 'kerberos_cache': '/test/file'}
init_config = {}
http = RequestsWrapper(instance, init_config)
with EnvVars({'KRB5CCNAME': 'old'}):
with mock.patch('requests.get', side_effect=lambda *args, **kwargs: os.environ.get('KRB5CCNAME')):
assert http.get('https://www.google.com') == '/test/file'
assert os.environ.get('KRB5CCNAME') == 'old'
def test_config_kerberos_keytab_file_rollback(self):
instance = {'auth_type': 'kerberos', 'kerberos_keytab': '/test/file'}
init_config = {}
http = RequestsWrapper(instance, init_config)
with EnvVars({'KRB5_CLIENT_KTNAME': 'old'}):
assert os.environ.get('KRB5_CLIENT_KTNAME') == 'old'
with mock.patch('requests.get', side_effect=lambda *args, **kwargs: os.environ.get('KRB5_CLIENT_KTNAME')):
assert http.get('https://www.google.com') == '/test/file'
assert os.environ.get('KRB5_CLIENT_KTNAME') == 'old'
def test_config_kerberos_legacy_remap(self):
instance = {'auth_type': 'kerberos', 'kerberos': True}
init_config = {}
# Trigger lazy import
http = RequestsWrapper(instance, init_config)
assert isinstance(http.options['auth'], requests_kerberos.HTTPKerberosAuth)
with mock.patch('datadog_checks.base.utils.http.requests_kerberos.HTTPKerberosAuth') as m:
RequestsWrapper(instance, init_config)
m.assert_called_once_with(
mutual_authentication=requests_kerberos.REQUIRED,
delegate=False,
force_preemptive=False,
hostname_override=None,
principal=None,
)
@pytest.mark.skipif(running_on_windows_ci(), reason='Test cannot be run on Windows CI')
def test_kerberos_auth_noconf(self, kerberos):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
response = http.get(kerberos["url"])
assert response.status_code == 401
@pytest.mark.skipif(running_on_windows_ci(), reason='Test cannot be run on Windows CI')
def test_kerberos_auth_principal_inexistent(self, kerberos):
instance = {
'url': kerberos["url"],
'auth_type': 'kerberos',
'kerberos_auth': 'required',
'kerberos_hostname': kerberos["hostname"],
'kerberos_cache': "DIR:{}".format(kerberos["cache"]),
'kerberos_keytab': kerberos["keytab"],
'kerberos_principal': "user/doesnotexist@{}".format(kerberos["realm"]),
'kerberos_force_initiate': 'false',
}
init_config = {}
http = RequestsWrapper(instance, init_config)
response = http.get(instance["url"])
assert response.status_code == 401
@pytest.mark.skipif(running_on_windows_ci(), reason='Test cannot be run on Windows CI')
def test_kerberos_auth_principal_incache_nokeytab(self, kerberos):
instance = {
'url': kerberos["url"],
'auth_type': 'kerberos',
'kerberos_auth': 'required',
'kerberos_cache': "DIR:{}".format(kerberos["cache"]),
'kerberos_hostname': kerberos["hostname"],
'kerberos_principal': "user/nokeytab@{}".format(kerberos["realm"]),
'kerberos_force_initiate': 'true',
}
init_config = {}
http = RequestsWrapper(instance, init_config)
response = http.get(instance["url"])
assert response.status_code == 200
@pytest.mark.skipif(running_on_windows_ci(), reason='Test cannot be run on Windows CI')
def test_kerberos_auth_principal_inkeytab_nocache(self, kerberos):
instance = {
'url': kerberos["url"],
'auth_type': 'kerberos',
'kerberos_auth': 'required',
'kerberos_hostname': kerberos["hostname"],
'kerberos_cache': "DIR:{}".format(kerberos["tmp_dir"]),
'kerberos_keytab': kerberos["keytab"],
'kerberos_principal': "user/inkeytab@{}".format(kerberos["realm"]),
'kerberos_force_initiate': 'true',
}
init_config = {}
http = RequestsWrapper(instance, init_config)
response = http.get(instance["url"])
assert response.status_code == 200
def test_config_ntlm(self):
instance = {'auth_type': 'ntlm', 'ntlm_domain': 'domain\\user', 'password': 'pass'}
init_config = {}
# Trigger lazy import
http = RequestsWrapper(instance, init_config)
assert isinstance(http.options['auth'], requests_ntlm.HttpNtlmAuth)
with mock.patch('datadog_checks.base.utils.http.requests_ntlm.HttpNtlmAuth') as m:
RequestsWrapper(instance, init_config)
m.assert_called_once_with('domain\\user', 'pass')
def test_config_ntlm_legacy(self, caplog):
instance = {'ntlm_domain': 'domain\\user', 'password': 'pass'}
init_config = {}
# Trigger lazy import
http = RequestsWrapper(instance, init_config)
assert isinstance(http.options['auth'], requests_ntlm.HttpNtlmAuth)
with mock.patch('datadog_checks.base.utils.http.requests_ntlm.HttpNtlmAuth') as m:
RequestsWrapper(instance, init_config)
m.assert_called_once_with('domain\\user', 'pass')
assert (
'The ability to use NTLM auth without explicitly setting auth_type to '
'`ntlm` is deprecated and will be removed in Agent 8'
) in caplog.text
def test_config_aws(self):
instance = {'auth_type': 'aws', 'aws_host': 'uri', 'aws_region': 'earth', 'aws_service': 'saas'}
init_config = {}
# Trigger lazy import
http = RequestsWrapper(instance, init_config)
assert isinstance(http.options['auth'], requests_aws.BotoAWSRequestsAuth)
with mock.patch('datadog_checks.base.utils.http.requests_aws.BotoAWSRequestsAuth') as m:
RequestsWrapper(instance, init_config)
m.assert_called_once_with(aws_host='uri', aws_region='earth', aws_service='saas')
def test_config_aws_service_remapper(self):
instance = {'auth_type': 'aws', 'aws_region': 'us-east-1'}
init_config = {}
remapper = {
'aws_service': {'name': 'aws_service', 'default': 'es'},
'aws_host': {'name': 'aws_host', 'default': 'uri'},
}
with mock.patch('datadog_checks.base.utils.http.requests_aws.BotoAWSRequestsAuth') as m:
RequestsWrapper(instance, init_config, remapper)
m.assert_called_once_with(aws_host='uri', aws_region='us-east-1', aws_service='es')
@pytest.mark.parametrize(
'case, instance, match',
[
('no host', {'auth_type': 'aws'}, '^AWS auth requires the setting `aws_host`$'),
('no region', {'auth_type': 'aws', 'aws_host': 'uri'}, '^AWS auth requires the setting `aws_region`$'),
(
'no service',
{'auth_type': 'aws', 'aws_host': 'uri', 'aws_region': 'us-east-1'},
'^AWS auth requires the setting `aws_service`$',
),
('empty host', {'auth_type': 'aws', 'aws_host': ''}, '^AWS auth requires the setting `aws_host`$'),
(
'empty region',
{'auth_type': 'aws', 'aws_host': 'uri', 'aws_region': ''},
'^AWS auth requires the setting `aws_region`$',
),
(
'empty service',
{'auth_type': 'aws', 'aws_host': 'uri', 'aws_region': 'us-east-1', 'aws_service': ''},
'^AWS auth requires the setting `aws_service`$',
),
],
)
def test_config_aws_invalid_cases(self, case, instance, match):
init_config = {}
with pytest.raises(ConfigurationError, match=match):
RequestsWrapper(instance, init_config)
class TestAuthTokenHandlerCreation:
def test_not_mapping(self):
instance = {'auth_token': ''}
init_config = {}
with pytest.raises(ConfigurationError, match='^The `auth_token` field must be a mapping$'):
RequestsWrapper(instance, init_config)
def test_no_reader(self):
instance = {'auth_token': {'writer': {}}}
init_config = {}
with pytest.raises(
ConfigurationError, match='^The `auth_token` field must define both `reader` and `writer` settings$'
):
RequestsWrapper(instance, init_config)
def test_no_writer(self):
instance = {'auth_token': {'reader': {}}}
init_config = {}
with pytest.raises(
ConfigurationError, match='^The `auth_token` field must define both `reader` and `writer` settings$'
):
RequestsWrapper(instance, init_config)
def test_reader_config_not_mapping(self):
instance = {'auth_token': {'reader': '', 'writer': {}}}
init_config = {}
with pytest.raises(ConfigurationError, match='^The `reader` settings of field `auth_token` must be a mapping$'):
RequestsWrapper(instance, init_config)
def test_writer_config_not_mapping(self):
instance = {'auth_token': {'reader': {}, 'writer': ''}}
init_config = {}
with pytest.raises(ConfigurationError, match='^The `writer` settings of field `auth_token` must be a mapping$'):
RequestsWrapper(instance, init_config)
def test_reader_type_missing(self):
instance = {'auth_token': {'reader': {}, 'writer': {}}}
init_config = {}
with pytest.raises(ConfigurationError, match='^The reader `type` of field `auth_token` is required$'):
RequestsWrapper(instance, init_config)
def test_reader_type_not_string(self):
instance = {'auth_token': {'reader': {'type': {}}, 'writer': {}}}
init_config = {}
with pytest.raises(ConfigurationError, match='^The reader `type` of field `auth_token` must be a string$'):
RequestsWrapper(instance, init_config)
def test_reader_type_unknown(self):
instance = {'auth_token': {'reader': {'type': 'foo'}, 'writer': {}}}
init_config = {}
with pytest.raises(
ConfigurationError, match='^Unknown `auth_token` reader type, must be one of: dcos_auth, file$'
):
RequestsWrapper(instance, init_config)
def test_writer_type_missing(self):
instance = {'auth_token': {'reader': {'type': 'file'}, 'writer': {}}}
init_config = {}
with pytest.raises(ConfigurationError, match='^The writer `type` of field `auth_token` is required$'):
RequestsWrapper(instance, init_config)
def test_writer_type_not_string(self):
instance = {'auth_token': {'reader': {'type': 'file'}, 'writer': {'type': {}}}}
init_config = {}
with pytest.raises(ConfigurationError, match='^The writer `type` of field `auth_token` must be a string$'):
RequestsWrapper(instance, init_config)
def test_writer_type_unknown(self):
instance = {'auth_token': {'reader': {'type': 'file'}, 'writer': {'type': 'foo'}}}
init_config = {}
with pytest.raises(ConfigurationError, match='^Unknown `auth_token` writer type, must be one of: header$'):
RequestsWrapper(instance, init_config)
class TestAuthTokenFileReaderCreation:
def test_path_missing(self):
instance = {'auth_token': {'reader': {'type': 'file'}, 'writer': {'type': 'header'}}}
init_config = {}
with pytest.raises(ConfigurationError, match='^The `path` setting of `auth_token` reader is required$'):
RequestsWrapper(instance, init_config)
def test_path_not_string(self):
instance = {'auth_token': {'reader': {'type': 'file', 'path': {}}, 'writer': {'type': 'header'}}}
init_config = {}
with pytest.raises(ConfigurationError, match='^The `path` setting of `auth_token` reader must be a string$'):
RequestsWrapper(instance, init_config)
def test_pattern_not_string(self):
instance = {
'auth_token': {'reader': {'type': 'file', 'path': '/foo', 'pattern': 0}, 'writer': {'type': 'header'}}
}
init_config = {}
with pytest.raises(ConfigurationError, match='^The `pattern` setting of `auth_token` reader must be a string$'):
RequestsWrapper(instance, init_config)
def test_pattern_no_groups(self):
instance = {
'auth_token': {'reader': {'type': 'file', 'path': '/foo', 'pattern': 'bar'}, 'writer': {'type': 'header'}}
}
init_config = {}
with pytest.raises(
ValueError, match='^The pattern `bar` setting of `auth_token` reader must define exactly one group$'
):
RequestsWrapper(instance, init_config)
class TestAuthTokenDCOSReaderCreation:
def test_login_url_missing(self):
instance = {'auth_token': {'reader': {'type': 'dcos_auth'}, 'writer': {'type': 'header'}}}
init_config = {}
with pytest.raises(
ConfigurationError, match='^The `login_url` setting of DC/OS auth token reader is required$'
):
RequestsWrapper(instance, init_config)
def test_login_url_not_string(self):
instance = {'auth_token': {'reader': {'type': 'dcos_auth', 'login_url': {}}, 'writer': {'type': 'header'}}}
init_config = {}
with pytest.raises(
ConfigurationError, match='^The `login_url` setting of DC/OS auth token reader must be a string$'
):
RequestsWrapper(instance, init_config)
def test_service_account_missing(self):
instance = {
'auth_token': {
'reader': {'type': 'dcos_auth', 'login_url': 'https://example.com'},
'writer': {'type': 'header'},
}
}
init_config = {}
with pytest.raises(
ConfigurationError, match='^The `service_account` setting of DC/OS auth token reader is required$'
):
RequestsWrapper(instance, init_config)
def test_service_account_not_string(self):
instance = {
'auth_token': {
'reader': {'type': 'dcos_auth', 'login_url': 'https://example.com', 'service_account': {}},
'writer': {'type': 'header'},
}
}
init_config = {}
with pytest.raises(
ConfigurationError, match='^The `service_account` setting of DC/OS auth token reader must be a string$'
):
RequestsWrapper(instance, init_config)
def test_private_key_path_missing(self):
instance = {
'auth_token': {
'reader': {'type': 'dcos_auth', 'login_url': 'https://example.com', 'service_account': 'datadog_agent'},
'writer': {'type': 'header'},
}
}
init_config = {}
with pytest.raises(
ConfigurationError, match='^The `private_key_path` setting of DC/OS auth token reader is required$'
):
RequestsWrapper(instance, init_config)
def test_private_key_path_not_string(self):
instance = {
'auth_token': {
'reader': {
'type': 'dcos_auth',
'login_url': 'https://example.com',
'service_account': 'datadog_agent',
'private_key_path': {},
},
'writer': {'type': 'header'},
}
}
init_config = {}
with pytest.raises(
ConfigurationError, match='^The `private_key_path` setting of DC/OS auth token reader must be a string$'
):
RequestsWrapper(instance, init_config)
def test_expiration_not_integer(self):
instance = {
'auth_token': {
'reader': {
'type': 'dcos_auth',
'login_url': 'https://example.com',
'service_account': 'datadog_agent',
'private_key_path': 'private-key.pem',
'expiration': {},
},
'writer': {'type': 'header'},
}
}
init_config = {}
with pytest.raises(
ConfigurationError, match='^The `expiration` setting of DC/OS auth token reader must be an integer$'
):
RequestsWrapper(instance, init_config)
class TestAuthTokenHeaderWriterCreation:
def test_name_missing(self):
instance = {'auth_token': {'reader': {'type': 'file', 'path': '/foo'}, 'writer': {'type': 'header'}}}
init_config = {}
with pytest.raises(ConfigurationError, match='^The `name` setting of `auth_token` writer is required$'):
RequestsWrapper(instance, init_config)
def test_name_not_string(self):
instance = {'auth_token': {'reader': {'type': 'file', 'path': '/foo'}, 'writer': {'type': 'header', 'name': 0}}}
init_config = {}
with pytest.raises(ConfigurationError, match='^The `name` setting of `auth_token` writer must be a string$'):
RequestsWrapper(instance, init_config)
def test_value_not_string(self):
instance = {
'auth_token': {
'reader': {'type': 'file', 'path': '/foo'},
'writer': {'type': 'header', 'name': 'foo', 'value': 0},
}
}
init_config = {}
with pytest.raises(ConfigurationError, match='^The `value` setting of `auth_token` writer must be a string$'):
RequestsWrapper(instance, init_config)
def test_placeholder_not_string(self):
instance = {
'auth_token': {
'reader': {'type': 'file', 'path': '/foo'},
'writer': {'type': 'header', 'name': 'foo', 'value': 'bar', 'placeholder': 0},
}
}
init_config = {}
with pytest.raises(
ConfigurationError, match='^The `placeholder` setting of `auth_token` writer must be a string$'
):
RequestsWrapper(instance, init_config)
def test_placeholder_empty_string(self):
instance = {
'auth_token': {
'reader': {'type': 'file', 'path': '/foo'},
'writer': {'type': 'header', 'name': 'foo', 'value': 'bar', 'placeholder': ''},
}
}
init_config = {}
with pytest.raises(
ConfigurationError, match='^The `placeholder` setting of `auth_token` writer cannot be an empty string$'
):
RequestsWrapper(instance, init_config)
def test_placeholder_not_in_value(self):
instance = {
'auth_token': {
'reader': {'type': 'file', 'path': '/foo'},
'writer': {'type': 'header', 'name': 'foo', 'value': 'bar'},
}
}
init_config = {}
with pytest.raises(
ConfigurationError,
match='^The `value` setting of `auth_token` writer does not contain the placeholder string `<TOKEN>`$',
):
RequestsWrapper(instance, init_config)
class TestAuthTokenReadFile:
def test_pattern_no_match(self):
with TempDir() as temp_dir:
token_file = os.path.join(temp_dir, 'token.txt')
instance = {
'auth_token': {
'reader': {'type': 'file', 'path': token_file, 'pattern': 'foo(.+)'},
'writer': {'type': 'header', 'name': 'Authorization', 'value': 'Bearer <TOKEN>'},
}
}
init_config = {}
http = RequestsWrapper(instance, init_config)
with mock.patch('requests.get'):
write_file(token_file, '\nsecret\nsecret\n')
with pytest.raises(
ValueError,
match='^{}$'.format(
re.escape('The pattern `foo(.+)` does not match anything in file: {}'.format(token_file))
),
):
http.get('https://www.google.com')
def test_pattern_match(self):
with TempDir() as temp_dir:
token_file = os.path.join(temp_dir, 'token.txt')
instance = {
'auth_token': {
'reader': {'type': 'file', 'path': token_file, 'pattern': 'foo(.+)'},
'writer': {'type': 'header', 'name': 'Authorization', 'value': 'Bearer <TOKEN>'},
}
}
init_config = {}
http = RequestsWrapper(instance, init_config)
expected_headers = {'Authorization': 'Bearer bar'}
expected_headers.update(DEFAULT_OPTIONS['headers'])
with mock.patch('requests.get') as get:
write_file(token_file, '\nfoobar\nfoobaz\n')
http.get('https://www.google.com')
get.assert_called_with(
'https://www.google.com',
headers=expected_headers,
auth=None,
cert=None,
proxies=None,
timeout=(10.0, 10.0),
verify=True,
)
assert http.options['headers'] == expected_headers
class TestAuthTokenDCOS:
def test_token_auth(self):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
def raise_for_status(self):
return True
priv_key_path = os.path.join(os.path.dirname(__file__), 'fixtures', 'dcos', 'private-key.pem')
pub_key_path = os.path.join(os.path.dirname(__file__), 'fixtures', 'dcos', 'public-key.pem')
exp = 3600
instance = {
'auth_token': {
'reader': {
'type': 'dcos_auth',
'login_url': 'https://leader.mesos/acs/api/v1/auth/login',
'service_account': 'datadog_agent',
'private_key_path': priv_key_path,
'expiration': exp,
},
'writer': {'type': 'header', 'name': 'Authorization', 'value': 'token=<TOKEN>'},
}
}
init_config = {}
def login(*args, **kwargs):
if kwargs['url'] == 'https://leader.mesos/acs/api/v1/auth/login':
json = kwargs['json']
assert json['uid'] == 'datadog_agent'
public_key = read_file(pub_key_path)
decoded = jwt.decode(json['token'], public_key, algorithms='RS256')
assert decoded['uid'] == 'datadog_agent'
assert isinstance(decoded['exp'], int)
assert abs(decoded['exp'] - (get_timestamp() + exp)) < 10
return MockResponse({'token': 'auth-token'}, 200)
return MockResponse(None, 404)
def auth(*args, **kwargs):
if args[0] == 'https://leader.mesos/service/some-service':
assert kwargs['headers']['Authorization'] == 'token=auth-token'
return MockResponse({}, 200)
return MockResponse(None, 404)
http = RequestsWrapper(instance, init_config)
with mock.patch('requests.post', side_effect=login), mock.patch('requests.get', side_effect=auth):
http.get('https://leader.mesos/service/some-service')
class TestAuthTokenWriteHeader:
def test_default_placeholder_same_as_value(self):
with TempDir() as temp_dir:
token_file = os.path.join(temp_dir, 'token.txt')
instance = {
'auth_token': {
'reader': {'type': 'file', 'path': token_file},
'writer': {'type': 'header', 'name': 'X-Vault-Token'},
}
}
init_config = {}
http = RequestsWrapper(instance, init_config)
expected_headers = {'X-Vault-Token': 'foobar'}
expected_headers.update(DEFAULT_OPTIONS['headers'])
with mock.patch('requests.get') as get:
write_file(token_file, '\nfoobar\n')
http.get('https://www.google.com')
get.assert_called_with(
'https://www.google.com',
headers=expected_headers,
auth=None,
cert=None,
proxies=None,
timeout=(10.0, 10.0),
verify=True,
)
assert http.options['headers'] == expected_headers
class TestAuthTokenFileReaderWithHeaderWriter:
def test_read_before_first_request(self):
with TempDir() as temp_dir:
token_file = os.path.join(temp_dir, 'token.txt')
instance = {
'auth_token': {
'reader': {'type': 'file', 'path': token_file},
'writer': {'type': 'header', 'name': 'Authorization', 'value': 'Bearer <TOKEN>'},
}
}
init_config = {}
http = RequestsWrapper(instance, init_config)
expected_headers = {'Authorization': 'Bearer secret1'}
expected_headers.update(DEFAULT_OPTIONS['headers'])
with mock.patch('requests.get') as get:
write_file(token_file, '\nsecret1\n')
http.get('https://www.google.com')
get.assert_called_with(
'https://www.google.com',
headers=expected_headers,
auth=None,
cert=None,
proxies=None,
timeout=(10.0, 10.0),
verify=True,
)
assert http.options['headers'] == expected_headers
# Should use cached token
write_file(token_file, '\nsecret2\n')
http.get('https://www.google.com')
get.assert_called_with(
'https://www.google.com',
headers=expected_headers,
auth=None,
cert=None,
proxies=None,
timeout=(10.0, 10.0),
verify=True,
)
assert http.options['headers'] == expected_headers
def test_refresh_after_connection_error(self):
with TempDir() as temp_dir:
token_file = os.path.join(temp_dir, 'token.txt')
instance = {
'auth_token': {
'reader': {'type': 'file', 'path': token_file},
'writer': {'type': 'header', 'name': 'Authorization', 'value': 'Bearer <TOKEN>'},
}
}
init_config = {}
http = RequestsWrapper(instance, init_config)
with mock.patch('requests.get'):
write_file(token_file, '\nsecret1\n')
http.get('https://www.google.com')
# TODO: use nonlocal when we drop Python 2 support
counter = {'errors': 0}
def raise_error_once(*args, **kwargs):
counter['errors'] += 1
if counter['errors'] <= 1:
raise Exception
expected_headers = {'Authorization': 'Bearer secret2'}
expected_headers.update(DEFAULT_OPTIONS['headers'])
with mock.patch('requests.get', side_effect=raise_error_once) as get:
write_file(token_file, '\nsecret2\n')
http.get('https://www.google.com')
get.assert_called_with(
'https://www.google.com',
headers=expected_headers,
auth=None,
cert=None,
proxies=None,
timeout=(10.0, 10.0),
verify=True,
)
assert http.options['headers'] == expected_headers
def test_refresh_after_bad_status_code(self):
with TempDir() as temp_dir:
token_file = os.path.join(temp_dir, 'token.txt')
instance = {
'auth_token': {
'reader': {'type': 'file', 'path': token_file},
'writer': {'type': 'header', 'name': 'Authorization', 'value': 'Bearer <TOKEN>'},
}
}
init_config = {}
http = RequestsWrapper(instance, init_config)
with mock.patch('requests.get'):
write_file(token_file, '\nsecret1\n')
http.get('https://www.google.com')
def error():
raise Exception()
expected_headers = {'Authorization': 'Bearer secret2'}
expected_headers.update(DEFAULT_OPTIONS['headers'])
with mock.patch('requests.get', return_value=mock.MagicMock(raise_for_status=error)) as get:
write_file(token_file, '\nsecret2\n')
http.get('https://www.google.com')
get.assert_called_with(
'https://www.google.com',
headers=expected_headers,
auth=None,
cert=None,
proxies=None,
timeout=(10.0, 10.0),
verify=True,
)
assert http.options['headers'] == expected_headers
class TestProxies:
def test_config_default(self):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['proxies'] is None
assert http.no_proxy_uris is None
def test_config_proxy_agent(self):
with mock.patch(
'datadog_checks.base.stubs.datadog_agent.get_config',
return_value={'http': 'http_host', 'https': 'https_host', 'no_proxy': 'uri1,uri2;uri3,uri4'},
):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['proxies'] == {'http': 'http_host', 'https': 'https_host'}
assert http.no_proxy_uris == ['uri1', 'uri2', 'uri3', 'uri4']
def test_config_proxy_init_config_override(self):
with mock.patch(
'datadog_checks.base.stubs.datadog_agent.get_config',
return_value={'http': 'unused', 'https': 'unused', 'no_proxy': 'unused'},
):
instance = {}
init_config = {'proxy': {'http': 'http_host', 'https': 'https_host', 'no_proxy': 'uri1,uri2;uri3,uri4'}}
http = RequestsWrapper(instance, init_config)
assert http.options['proxies'] == {'http': 'http_host', 'https': 'https_host'}
assert http.no_proxy_uris == ['uri1', 'uri2', 'uri3', 'uri4']
def test_config_proxy_instance_override(self):
with mock.patch(
'datadog_checks.base.stubs.datadog_agent.get_config',
return_value={'http': 'unused', 'https': 'unused', 'no_proxy': 'unused'},
):
instance = {'proxy': {'http': 'http_host', 'https': 'https_host', 'no_proxy': 'uri1,uri2;uri3,uri4'}}
init_config = {'proxy': {'http': 'unused', 'https': 'unused', 'no_proxy': 'unused'}}
http = RequestsWrapper(instance, init_config)
assert http.options['proxies'] == {'http': 'http_host', 'https': 'https_host'}
assert http.no_proxy_uris == ['uri1', 'uri2', 'uri3', 'uri4']
def test_config_no_proxy_as_list(self):
with mock.patch(
'datadog_checks.base.stubs.datadog_agent.get_config',
return_value={'http': 'http_host', 'https': 'https_host', 'no_proxy': ['uri1', 'uri2', 'uri3', 'uri4']},
):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['proxies'] == {'http': 'http_host', 'https': 'https_host'}
assert http.no_proxy_uris == ['uri1', 'uri2', 'uri3', 'uri4']
def test_config_proxy_skip(self):
instance = {'proxy': {'http': 'unused', 'https': 'unused', 'no_proxy': 'unused'}, 'skip_proxy': True}
init_config = {'proxy': {'http': 'unused', 'https': 'unused', 'no_proxy': 'unused'}}
http = RequestsWrapper(instance, init_config)
assert http.options['proxies'] == {'http': '', 'https': ''}
assert http.no_proxy_uris is None
def test_config_proxy_skip_init_config(self):
instance = {'proxy': {'http': 'unused', 'https': 'unused', 'no_proxy': 'unused'}}
init_config = {'proxy': {'http': 'unused', 'https': 'unused', 'no_proxy': 'unused'}, 'skip_proxy': True}
http = RequestsWrapper(instance, init_config)
assert http.options['proxies'] == {'http': '', 'https': ''}
assert http.no_proxy_uris is None
def test_proxy_env_vars_skip(self):
instance = {'skip_proxy': True}
init_config = {}
http = RequestsWrapper(instance, init_config)
with EnvVars({'HTTP_PROXY': 'http://1.2.3.4:567'}):
response = http.get('http://www.google.com')
response.raise_for_status()
with EnvVars({'HTTPS_PROXY': 'https://1.2.3.4:567'}):
response = http.get('https://www.google.com')
response.raise_for_status()
def test_proxy_env_vars_override_skip_fail(self):
instance = {'skip_proxy': True}
init_config = {}
http = RequestsWrapper(instance, init_config)
with EnvVars({'HTTP_PROXY': 'http://1.2.3.4:567'}):
with pytest.raises((ConnectTimeout, ProxyError)):
http.get('http://www.google.com', timeout=1, proxies=None)
with EnvVars({'HTTPS_PROXY': 'https://1.2.3.4:567'}):
with pytest.raises((ConnectTimeout, ProxyError)):
http.get('https://www.google.com', timeout=1, proxies=None)
def test_proxy_bad(self):
instance = {'proxy': {'http': 'http://1.2.3.4:567', 'https': 'https://1.2.3.4:567'}}
init_config = {}
http = RequestsWrapper(instance, init_config)
with pytest.raises((ConnectTimeout, ProxyError)):
http.get('http://www.google.com', timeout=1)
with pytest.raises((ConnectTimeout, ProxyError)):
http.get('https://www.google.com', timeout=1)
def test_proxy_bad_no_proxy_override_success(self):
instance = {
'proxy': {'http': 'http://1.2.3.4:567', 'https': 'https://1.2.3.4:567', 'no_proxy': 'unused,google.com'}
}
init_config = {}
http = RequestsWrapper(instance, init_config)
response = http.get('http://www.google.com')
response.raise_for_status()
response = http.get('https://www.google.com')
response.raise_for_status()
def test_no_proxy_uris_coverage(self):
http = RequestsWrapper({}, {})
# Coverage is not smart enough to detect that looping an empty
# iterable will never occur when gated by `if iterable:`.
http.no_proxy_uris = mock.MagicMock()
http.no_proxy_uris.__iter__ = lambda self, *args, **kwargs: iter([])
http.no_proxy_uris.__bool__ = lambda self, *args, **kwargs: True
# TODO: Remove with Python 2
http.no_proxy_uris.__nonzero__ = lambda self, *args, **kwargs: True
http.get('https://www.google.com')
@pytest.mark.skipif(running_on_windows_ci(), reason='Test cannot be run on Windows CI')
def test_socks5_proxy(self, socks5_proxy):
instance = {'proxy': {'http': 'socks5h://{}'.format(socks5_proxy)}}
init_config = {}
http = RequestsWrapper(instance, init_config)
http.get('http://www.google.com')
http.get('http://nginx')
@pytest.mark.skipif(running_on_windows_ci(), reason='Test cannot be run on Windows CI')
def test_no_proxy_single_wildcard(self, socks5_proxy):
instance = {'proxy': {'http': 'http://1.2.3.4:567', 'no_proxy': '.foo,bar,*'}}
init_config = {}
http = RequestsWrapper(instance, init_config)
http.get('http://www.example.org')
http.get('http://www.example.com')
http.get('http://127.0.0.9')
@pytest.mark.skipif(running_on_windows_ci(), reason='Test cannot be run on Windows CI')
def test_no_proxy_domain(self, socks5_proxy):
instance = {'proxy': {'http': 'http://1.2.3.4:567', 'no_proxy': '.google.com,*.example.org,example.com,9'}}
init_config = {}
http = RequestsWrapper(instance, init_config)
# no_proxy match: .google.com
http.get('http://www.google.com')
# no_proxy match: *.example.org
http.get('http://www.example.org')
# no_proxy match: example.com
http.get('http://www.example.com')
http.get('http://example.com')
# no_proxy match: 9
http.get('http://127.0.0.9')
@pytest.mark.skipif(running_on_windows_ci(), reason='Test cannot be run on Windows CI')
def test_no_proxy_domain_fail(self, socks5_proxy):
instance = {'proxy': {'http': 'http://1.2.3.4:567', 'no_proxy': '.google.com,example.com,example,9'}}
init_config = {}
http = RequestsWrapper(instance, init_config)
# no_proxy not match: .google.com
# ".y.com" matches "x.y.com" but not "y.com"
with pytest.raises((ConnectTimeout, ProxyError)):
http.get('http://google.com', timeout=1)
# no_proxy not match: example or example.com
with pytest.raises((ConnectTimeout, ProxyError)):
http.get('http://notexample.com', timeout=1)
with pytest.raises((ConnectTimeout, ProxyError)):
http.get('http://example.org', timeout=1)
# no_proxy not match: 9
with pytest.raises((ConnectTimeout, ProxyError)):
http.get('http://127.0.0.99', timeout=1)
@pytest.mark.skipif(running_on_windows_ci(), reason='Test cannot be run on Windows CI')
def test_no_proxy_ip(self, socks5_proxy):
instance = {
'proxy': {
'http': 'http://1.2.3.4:567',
'no_proxy': '127.0.0.1,127.0.0.2/32,127.1.0.0/25,127.1.1.0/255.255.255.128,127.1.2.0/0.0.0.127',
}
}
init_config = {}
http = RequestsWrapper(instance, init_config)
# no_proxy match: 127.0.0.1
http.get('http://127.0.0.1', timeout=1)
# no_proxy match: 127.0.0.2/32
http.get('http://127.0.0.2', timeout=1)
# no_proxy match: IP within 127.1.0.0/25 subnet - cidr bits format
http.get('http://127.1.0.50', timeout=1)
http.get('http://127.1.0.100', timeout=1)
# no_proxy match: IP within 127.1.1.0/255.255.255.128 subnet - net mask format
http.get('http://127.1.1.50', timeout=1)
http.get('http://127.1.1.100', timeout=1)
# no_proxy match: IP within 127.1.2.0/0.0.0.127 subnet - host mask format
http.get('http://127.1.2.50', timeout=1)
http.get('http://127.1.2.100', timeout=1)
@pytest.mark.skipif(running_on_windows_ci(), reason='Test cannot be run on Windows CI')
def test_no_proxy_ip_fail(self, socks5_proxy):
instance = {
'proxy': {
'http': 'http://1.2.3.4:567',
'no_proxy': '127.0.0.1,127.0.0.2/32,127.1.0.0/25,127.1.1.0/255.255.255.128,127.1.2.0/0.0.0.127',
}
}
init_config = {}
http = RequestsWrapper(instance, init_config)
# no_proxy not match: 127.0.0.1
with pytest.raises((ConnectTimeout, ProxyError)):
http.get('http://127.0.0.11', timeout=1)
# no_proxy not match: 127.0.0.2/32
with pytest.raises((ConnectTimeout, ProxyError)):
http.get('http://127.0.0.22', timeout=1)
# no_proxy not match: IP outside 127.1.0.0/25 subnet - cidr bits format
with pytest.raises((ConnectTimeout, ProxyError)):
http.get('http://127.1.0.150', timeout=1)
http.get('http://127.1.0.200', timeout=1)
# no_proxy not match: IP outside 127.1.1.0/255.255.255.128 subnet - net mask format
with pytest.raises((ConnectTimeout, ProxyError)):
http.get('http://127.1.1.150', timeout=1)
http.get('http://127.1.1.200', timeout=1)
# no_proxy not match: IP outside 127.1.2.0/0.0.0.127 subnet - host mask format
with pytest.raises((ConnectTimeout, ProxyError)):
http.get('http://127.1.2.150', timeout=1)
http.get('http://127.1.2.200', timeout=1)
class TestIgnoreTLSWarning:
def test_config_default(self):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.ignore_tls_warning is False
def test_config_flag(self):
instance = {'tls_ignore_warning': True}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.ignore_tls_warning is True
def test_init_config_flag(self):
instance = {}
init_config = {'tls_ignore_warning': True}
http = RequestsWrapper(instance, init_config)
assert http.ignore_tls_warning is True
def test_instance_and_init_flag(self):
instance = {'tls_ignore_warning': False}
init_config = {'tls_ignore_warning': True}
http = RequestsWrapper(instance, init_config)
assert http.ignore_tls_warning is False
def test_default_no_ignore(self, caplog):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
with caplog.at_level(logging.DEBUG), mock.patch('requests.get'):
http.get('https://www.google.com', verify=False)
expected_message = 'An unverified HTTPS request is being made to https://www.google.com'
for _, level, message in caplog.record_tuples:
if level == logging.WARNING and message == expected_message:
break
else:
raise AssertionError('Expected WARNING log with message `{}`'.format(expected_message))
def test_default_no_ignore_http(self, caplog):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
with caplog.at_level(logging.DEBUG), mock.patch('requests.get'):
http.get('http://www.google.com', verify=False)
assert sum(1 for _, level, _ in caplog.record_tuples if level == logging.WARNING) == 0
def test_ignore(self, caplog):
instance = {'tls_ignore_warning': True}
init_config = {}
http = RequestsWrapper(instance, init_config)
with caplog.at_level(logging.DEBUG), mock.patch('requests.get'):
http.get('https://www.google.com', verify=False)
expected_message = 'An unverified HTTPS request is being made to https://www.google.com'
for _, _, message in caplog.record_tuples:
assert message != expected_message
def test_default_no_ignore_session(self, caplog):
instance = {'persist_connections': True}
init_config = {}
http = RequestsWrapper(instance, init_config)
with caplog.at_level(logging.DEBUG), mock.patch('requests.get'):
http.get('https://www.google.com', verify=False)
expected_message = 'An unverified HTTPS request is being made to https://www.google.com'
for _, level, message in caplog.record_tuples:
if level == logging.WARNING and message == expected_message:
break
else:
raise AssertionError('Expected WARNING log with message `{}`'.format(expected_message))
def test_ignore_session(self, caplog):
instance = {'tls_ignore_warning': True, 'persist_connections': True}
init_config = {}
http = RequestsWrapper(instance, init_config)
with caplog.at_level(logging.DEBUG), mock.patch('requests.get'):
http.get('https://www.google.com', verify=False)
expected_message = 'An unverified HTTPS request is being made to https://www.google.com'
for _, _, message in caplog.record_tuples:
assert message != expected_message
def test_init_ignore(self, caplog):
instance = {}
init_config = {'tls_ignore_warning': True}
http = RequestsWrapper(instance, init_config)
with caplog.at_level(logging.DEBUG), mock.patch('requests.get'):
http.get('https://www.google.com', verify=False)
expected_message = 'An unverified HTTPS request is being made to https://www.google.com'
for _, _, message in caplog.record_tuples:
assert message != expected_message
def test_default_init_no_ignore(self, caplog):
instance = {}
init_config = {'tls_ignore_warning': False}
http = RequestsWrapper(instance, init_config)
with caplog.at_level(logging.DEBUG), mock.patch('requests.get'):
http.get('https://www.google.com', verify=False)
expected_message = 'An unverified HTTPS request is being made to https://www.google.com'
for _, level, message in caplog.record_tuples:
if level == logging.WARNING and message == expected_message:
break
else:
raise AssertionError('Expected WARNING log with message `{}`'.format(expected_message))
def test_instance_ignore(self, caplog):
instance = {'tls_ignore_warning': True}
init_config = {'tls_ignore_warning': False}
http = RequestsWrapper(instance, init_config)
with caplog.at_level(logging.DEBUG), mock.patch('requests.get'):
http.get('https://www.google.com', verify=False)
expected_message = 'An unverified HTTPS request is being made to https://www.google.com'
for _, _, message in caplog.record_tuples:
assert message != expected_message
def test_instance_no_ignore(self, caplog):
instance = {'tls_ignore_warning': False}
init_config = {'tls_ignore_warning': True}
http = RequestsWrapper(instance, init_config)
with caplog.at_level(logging.DEBUG), mock.patch('requests.get'):
http.get('https://www.google.com', verify=False)
expected_message = 'An unverified HTTPS request is being made to https://www.google.com'
for _, level, message in caplog.record_tuples:
if level == logging.WARNING and message == expected_message:
break
else:
raise AssertionError('Expected WARNING log with message `{}`'.format(expected_message))
class TestUnixDomainSocket:
@pytest.mark.parametrize(
'value, expected',
[
pytest.param('http://example.org', False, id='non-uds-url'),
pytest.param('unix:///var/run/test.sock/info', True, id='unquoted'),
pytest.param('unix://%2Fvar%2Frun%2Ftest.sock', True, id='quoted'),
],
)
def test_is_uds_url(self, value, expected):
# type: (str, bool) -> None
assert is_uds_url(value) == expected
@pytest.mark.parametrize(
'value, expected',
[
pytest.param('http://example.org', 'http://example.org', id='non-uds-url'),
pytest.param('unix:///var/run/test.sock/info', 'unix://%2Fvar%2Frun%2Ftest.sock/info', id='uds-url'),
pytest.param('unix:///var/run/test.sock', 'unix://%2Fvar%2Frun%2Ftest.sock', id='uds-url-no-path'),
pytest.param(
'unix://%2Fvar%2Frun%2Ftest.sock/info', 'unix://%2Fvar%2Frun%2Ftest.sock/info', id='already-quoted'
),
],
)
def test_quote_uds_url(self, value, expected):
# type: (str, str) -> None
assert quote_uds_url(value) == expected
def test_adapter_mounted(self):
# type: () -> None
http = RequestsWrapper({}, {})
url = 'unix:///var/run/test.sock'
adapter = http.session.get_adapter(url=url)
assert adapter is not None
assert isinstance(adapter, requests_unixsocket.UnixAdapter)
@pytest.mark.skipif(ON_WINDOWS, reason='AF_UNIX not supported by Python on Windows yet')
def test_uds_request(self, uds_path):
# type: (str) -> None
http = RequestsWrapper({}, {})
url = 'unix://{}'.format(uds_path)
response = http.get(url)
assert response.status_code == 200
assert response.text == 'Hello, World!'
class TestSession:
def test_default_none(self):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http._session is None
def test_lazy_create(self):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.session is http._session
assert isinstance(http.session, requests.Session)
def test_attributes(self):
instance = {}
init_config = {}
http = RequestsWrapper(instance, init_config)
for key, value in iteritems(http.options):
assert hasattr(http.session, key)
assert getattr(http.session, key) == value
class TestRemapper:
def test_legacy_no_proxy(self):
instance = {'no_proxy': True}
init_config = {}
http = RequestsWrapper(instance, init_config)
assert http.options['proxies'] == {'http': '', 'https': ''}
assert http.no_proxy_uris is None
def test_no_default(self):
instance = {}
init_config = {}
remapper = {'prometheus_timeout': {'name': 'timeout'}}
http = RequestsWrapper(instance, init_config, remapper)
assert http.options['timeout'] == (STANDARD_FIELDS['timeout'], STANDARD_FIELDS['timeout'])
def test_invert(self):
instance = {'disable_ssl_validation': False}
init_config = {}
remapper = {'disable_ssl_validation': {'name': 'tls_verify', 'default': False, 'invert': True}}
http = RequestsWrapper(instance, init_config, remapper)
assert http.options['verify'] is True
def test_invert_without_explicit_default(self):
instance = {}
init_config = {}
remapper = {'disable_ssl_validation': {'name': 'tls_verify', 'invert': True}}
http = RequestsWrapper(instance, init_config, remapper)
assert http.options['verify'] is True
def test_standard_override(self):
instance = {'disable_ssl_validation': True, 'tls_verify': False}
init_config = {}
remapper = {'disable_ssl_validation': {'name': 'tls_verify', 'default': False, 'invert': True}}
http = RequestsWrapper(instance, init_config, remapper)
assert http.options['verify'] is False
def test_unknown_name_default(self):
instance = {}
init_config = {}
remapper = {'verify_tls': {'name': 'verify', 'default': False}}
http = RequestsWrapper(instance, init_config, remapper)
assert http.options['verify'] is True
class TestLogger:
def test_default(self, caplog):
check = AgentCheck('test', {}, [{}])
with caplog.at_level(logging.DEBUG), mock.patch('requests.get'):
check.http.get('https://www.google.com')
expected_message = 'Sending GET request to https://www.google.com'
for _, _, message in caplog.record_tuples:
assert message != expected_message
def test_instance(self, caplog):
instance = {'log_requests': True}
init_config = {}
check = AgentCheck('test', init_config, [instance])
assert check.http.logger is check.log
with caplog.at_level(logging.DEBUG), mock.patch('requests.get'):
check.http.get('https://www.google.com')
expected_message = 'Sending GET request to https://www.google.com'
for _, level, message in caplog.record_tuples:
if level == logging.DEBUG and message == expected_message:
break
else:
raise AssertionError('Expected DEBUG log with message `{}`'.format(expected_message))
def test_init_config(self, caplog):
instance = {}
init_config = {'log_requests': True}
check = AgentCheck('test', init_config, [instance])
assert check.http.logger is check.log
with caplog.at_level(logging.DEBUG), mock.patch('requests.get'):
check.http.get('https://www.google.com')
expected_message = 'Sending GET request to https://www.google.com'
for _, level, message in caplog.record_tuples:
if level == logging.DEBUG and message == expected_message:
break
else:
raise AssertionError('Expected DEBUG log with message `{}`'.format(expected_message))
def test_instance_override(self, caplog):
instance = {'log_requests': False}
init_config = {'log_requests': True}
check = AgentCheck('test', init_config, [instance])
with caplog.at_level(logging.DEBUG), mock.patch('requests.get'):
check.http.get('https://www.google.com')
expected_message = 'Sending GET request to https://www.google.com'
for _, _, message in caplog.record_tuples:
assert message != expected_message
class TestAPI:
def test_get(self):
http = RequestsWrapper({}, {})
with mock.patch('requests.get'):
http.get('https://www.google.com')
requests.get.assert_called_once_with('https://www.google.com', **http.options)
def test_get_session(self):
http = RequestsWrapper({'persist_connections': True}, {})
with mock.patch('datadog_checks.base.utils.http.RequestsWrapper.session'):
http.get('https://www.google.com')
http.session.get.assert_called_once_with('https://www.google.com', **DEFAULT_OPTIONS)
def test_get_option_override(self):
http = RequestsWrapper({}, {})
options = http.options.copy()
options['auth'] = ('user', 'pass')
with mock.patch('requests.get'):
http.get('https://www.google.com', auth=options['auth'])
requests.get.assert_called_once_with('https://www.google.com', **options)
def test_get_session_option_override(self):
http = RequestsWrapper({}, {})
options = DEFAULT_OPTIONS.copy()
options.update({'auth': ('user', 'pass')})
with mock.patch('datadog_checks.base.utils.http.RequestsWrapper.session'):
http.get('https://www.google.com', persist=True, auth=options['auth'])
http.session.get.assert_called_once_with('https://www.google.com', **options)
def test_post(self):
http = RequestsWrapper({}, {})
with mock.patch('requests.post'):
http.post('https://www.google.com')
requests.post.assert_called_once_with('https://www.google.com', **http.options)
def test_post_session(self):
http = RequestsWrapper({'persist_connections': True}, {})
with mock.patch('datadog_checks.base.utils.http.RequestsWrapper.session'):
http.post('https://www.google.com')
http.session.post.assert_called_once_with('https://www.google.com', **DEFAULT_OPTIONS)
def test_post_option_override(self):
http = RequestsWrapper({}, {})
options = http.options.copy()
options['auth'] = ('user', 'pass')
with mock.patch('requests.post'):
http.post('https://www.google.com', auth=options['auth'])
requests.post.assert_called_once_with('https://www.google.com', **options)
def test_post_session_option_override(self):
http = RequestsWrapper({}, {})
options = DEFAULT_OPTIONS.copy()
options.update({'auth': ('user', 'pass')})
with mock.patch('datadog_checks.base.utils.http.RequestsWrapper.session'):
http.post('https://www.google.com', persist=True, auth=options['auth'])
http.session.post.assert_called_once_with('https://www.google.com', **options)
def test_head(self):
http = RequestsWrapper({}, {})
with mock.patch('requests.head'):
http.head('https://www.google.com')
requests.head.assert_called_once_with('https://www.google.com', **http.options)
def test_head_session(self):
http = RequestsWrapper({'persist_connections': True}, {})
with mock.patch('datadog_checks.base.utils.http.RequestsWrapper.session'):
http.head('https://www.google.com')
http.session.head.assert_called_once_with('https://www.google.com', **DEFAULT_OPTIONS)
def test_head_option_override(self):
http = RequestsWrapper({}, {})
options = http.options.copy()
options['auth'] = ('user', 'pass')
with mock.patch('requests.head'):
http.head('https://www.google.com', auth=options['auth'])
requests.head.assert_called_once_with('https://www.google.com', **options)
def test_head_session_option_override(self):
http = RequestsWrapper({}, {})
options = DEFAULT_OPTIONS.copy()
options.update({'auth': ('user', 'pass')})
with mock.patch('datadog_checks.base.utils.http.RequestsWrapper.session'):
http.head('https://www.google.com', persist=True, auth=options['auth'])
http.session.head.assert_called_once_with('https://www.google.com', **options)
def test_put(self):
http = RequestsWrapper({}, {})
with mock.patch('requests.put'):
http.put('https://www.google.com')
requests.put.assert_called_once_with('https://www.google.com', **http.options)
def test_put_session(self):
http = RequestsWrapper({'persist_connections': True}, {})
with mock.patch('datadog_checks.base.utils.http.RequestsWrapper.session'):
http.put('https://www.google.com')
http.session.put.assert_called_once_with('https://www.google.com', **DEFAULT_OPTIONS)
def test_put_option_override(self):
http = RequestsWrapper({}, {})
options = http.options.copy()
options['auth'] = ('user', 'pass')
with mock.patch('requests.put'):
http.put('https://www.google.com', auth=options['auth'])
requests.put.assert_called_once_with('https://www.google.com', **options)
def test_put_session_option_override(self):
http = RequestsWrapper({}, {})
options = DEFAULT_OPTIONS.copy()
options.update({'auth': ('user', 'pass')})
with mock.patch('datadog_checks.base.utils.http.RequestsWrapper.session'):
http.put('https://www.google.com', persist=True, auth=options['auth'])
http.session.put.assert_called_once_with('https://www.google.com', **options)
def test_patch(self):
http = RequestsWrapper({}, {})
with mock.patch('requests.patch'):
http.patch('https://www.google.com')
requests.patch.assert_called_once_with('https://www.google.com', **http.options)
def test_patch_session(self):
http = RequestsWrapper({'persist_connections': True}, {})
with mock.patch('datadog_checks.base.utils.http.RequestsWrapper.session'):
http.patch('https://www.google.com')
http.session.patch.assert_called_once_with('https://www.google.com', **DEFAULT_OPTIONS)
def test_patch_option_override(self):
http = RequestsWrapper({}, {})
options = http.options.copy()
options['auth'] = ('user', 'pass')
with mock.patch('requests.patch'):
http.patch('https://www.google.com', auth=options['auth'])
requests.patch.assert_called_once_with('https://www.google.com', **options)
def test_patch_session_option_override(self):
http = RequestsWrapper({}, {})
options = DEFAULT_OPTIONS.copy()
options.update({'auth': ('user', 'pass')})
with mock.patch('datadog_checks.base.utils.http.RequestsWrapper.session'):
http.patch('https://www.google.com', persist=True, auth=options['auth'])
http.session.patch.assert_called_once_with('https://www.google.com', **options)
def test_delete(self):
http = RequestsWrapper({}, {})
with mock.patch('requests.delete'):
http.delete('https://www.google.com')
requests.delete.assert_called_once_with('https://www.google.com', **http.options)
def test_delete_session(self):
http = RequestsWrapper({'persist_connections': True}, {})
with mock.patch('datadog_checks.base.utils.http.RequestsWrapper.session'):
http.delete('https://www.google.com')
http.session.delete.assert_called_once_with('https://www.google.com', **DEFAULT_OPTIONS)
def test_delete_option_override(self):
http = RequestsWrapper({}, {})
options = http.options.copy()
options['auth'] = ('user', 'pass')
with mock.patch('requests.delete'):
http.delete('https://www.google.com', auth=options['auth'])
requests.delete.assert_called_once_with('https://www.google.com', **options)
def test_delete_session_option_override(self):
http = RequestsWrapper({}, {})
options = DEFAULT_OPTIONS.copy()
options.update({'auth': ('user', 'pass')})
with mock.patch('datadog_checks.base.utils.http.RequestsWrapper.session'):
http.delete('https://www.google.com', persist=True, auth=options['auth'])
http.session.delete.assert_called_once_with('https://www.google.com', **options)
def test_options(self):
http = RequestsWrapper({}, {})
with mock.patch('requests.options'):
http.options_method('https://www.google.com')
requests.options.assert_called_once_with('https://www.google.com', **http.options)
def test_options_session(self):
http = RequestsWrapper({'persist_connections': True}, {})
with mock.patch('datadog_checks.base.utils.http.RequestsWrapper.session'):
http.options_method('https://www.google.com')
http.session.options.assert_called_once_with('https://www.google.com', **DEFAULT_OPTIONS)
def test_options_option_override(self):
http = RequestsWrapper({}, {})
options = http.options.copy()
options['auth'] = ('user', 'pass')
with mock.patch('requests.options'):
http.options_method('https://www.google.com', auth=options['auth'])
requests.options.assert_called_once_with('https://www.google.com', **options)
def test_options_session_option_override(self):
http = RequestsWrapper({}, {})
options = DEFAULT_OPTIONS.copy()
options.update({'auth': ('user', 'pass')})
with mock.patch('datadog_checks.base.utils.http.RequestsWrapper.session'):
http.options_method('https://www.google.com', persist=True, auth=options['auth'])
http.session.options.assert_called_once_with('https://www.google.com', **options)
class TestIntegration:
def test_session_timeout(self):
http = RequestsWrapper({'persist_connections': True}, {'timeout': 0.08})
with pytest.raises(requests.exceptions.Timeout):
http.get('https://httpbin.org/delay/0.10')
| 39.034097
| 120
| 0.59696
| 8,478
| 76,702
| 5.207006
| 0.060392
| 0.057311
| 0.060347
| 0.092695
| 0.857809
| 0.826209
| 0.80145
| 0.772726
| 0.752384
| 0.714056
| 0
| 0.012593
| 0.264934
| 76,702
| 1,964
| 121
| 39.053971
| 0.770384
| 0.021877
| 0
| 0.596915
| 0
| 0.001341
| 0.223926
| 0.031502
| 0
| 0
| 0
| 0.000509
| 0.107311
| 1
| 0.109993
| false
| 0.018779
| 0.014085
| 0.001341
| 0.143528
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a7a897f19e575984fb22676bda487ee9a0e6f6b
| 2,339
|
py
|
Python
|
diffprof/tests/test_nfw_fitter.py
|
BaryonPasters/diffprof
|
d8064f65e24df749fb69a04c33de1f2a90157016
|
[
"BSD-3-Clause"
] | 1
|
2021-07-07T22:21:26.000Z
|
2021-07-07T22:21:26.000Z
|
diffprof/tests/test_nfw_fitter.py
|
BaryonPasters/diffprof
|
d8064f65e24df749fb69a04c33de1f2a90157016
|
[
"BSD-3-Clause"
] | 4
|
2021-05-24T18:12:36.000Z
|
2021-09-09T19:12:58.000Z
|
diffprof/tests/test_nfw_fitter.py
|
BaryonPasters/diffprof
|
d8064f65e24df749fb69a04c33de1f2a90157016
|
[
"BSD-3-Clause"
] | null | null | null |
"""
"""
import numpy as np
from ..nfw_evolution import lgc_vs_lgt, get_bounded_params
from ..fit_nfw_helpers import fit_lgconc, get_loss_data
from ..fit_nfw_helpers_fixed_k import fit_lgconc as fit_lgconc_fixed_k
from ..fit_nfw_helpers_fixed_k import get_loss_data as get_loss_data_fixed_k
from ..fit_nfw_helpers_fixed_k import FIXED_K
SEED = 32
def test_conc_fitter():
"""Pick a random point in parameter space and demonstrate that the fitter
recovers the correct result.
"""
t_sim = np.linspace(0.1, 14, 100)
lgt_sim = np.log10(t_sim)
rng = np.random.RandomState(SEED)
up_target = rng.normal(loc=0, size=4, scale=1)
p_target = get_bounded_params(up_target)
lgc_sim = lgc_vs_lgt(lgt_sim, *p_target)
conc_sim = 10 ** lgc_sim
log_mah_sim = np.zeros_like(conc_sim) + 100
lgm_min = 0
u_p0, _loss_data = get_loss_data(t_sim, conc_sim, log_mah_sim, lgm_min)
res = fit_lgconc(t_sim, conc_sim, log_mah_sim, lgm_min)
p_best, loss, method, loss_data = res
lgc_best = lgc_vs_lgt(lgt_sim, *p_best)
assert np.allclose(lgc_sim, lgc_best, atol=0.01)
assert np.allclose(p_best, p_target, atol=0.01)
# Enforce that the returned loss_data contains the expected information
for a, b in zip(_loss_data, loss_data):
assert np.allclose(a, b)
def test_conc_fitter_fixed_k():
"""Pick a random point in parameter space and demonstrate that the fitter
recovers the correct result.
"""
t_sim = np.linspace(0.1, 14, 100)
lgt_sim = np.log10(t_sim)
rng = np.random.RandomState(SEED)
up_target = rng.normal(loc=0, size=4, scale=1)
p_target = np.array(get_bounded_params(up_target))
p_target[1] = FIXED_K
lgc_sim = lgc_vs_lgt(lgt_sim, *p_target)
conc_sim = 10 ** lgc_sim
log_mah_sim = np.zeros_like(conc_sim) + 100
lgm_min = 0
u_p0, _loss_data = get_loss_data_fixed_k(t_sim, conc_sim, log_mah_sim, lgm_min)
res = fit_lgconc_fixed_k(t_sim, conc_sim, log_mah_sim, lgm_min)
p_best, loss, method, loss_data = res
lgc_best = lgc_vs_lgt(lgt_sim, *p_best)
assert np.allclose(lgc_sim, lgc_best, atol=0.01)
assert np.allclose(p_best, p_target, atol=0.01)
# Enforce that the returned loss_data contains the expected information
for a, b in zip(_loss_data, loss_data):
assert np.allclose(a, b)
| 37.725806
| 83
| 0.719538
| 418
| 2,339
| 3.674641
| 0.198565
| 0.078125
| 0.035156
| 0.046875
| 0.863281
| 0.82487
| 0.82487
| 0.80599
| 0.80599
| 0.760417
| 0
| 0.027836
| 0.185977
| 2,339
| 61
| 84
| 38.344262
| 0.778887
| 0.145361
| 0
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 1
| 0.045455
| false
| 0
| 0.136364
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a8601a16fcc062c2ff176fb5027415f058e8615
| 53,062
|
py
|
Python
|
bin/detail/toolchain_table.py
|
hjmallon/polly
|
6c7f1bf62b489bf76382d7fae550694ab66902e1
|
[
"BSD-2-Clause"
] | null | null | null |
bin/detail/toolchain_table.py
|
hjmallon/polly
|
6c7f1bf62b489bf76382d7fae550694ab66902e1
|
[
"BSD-2-Clause"
] | null | null | null |
bin/detail/toolchain_table.py
|
hjmallon/polly
|
6c7f1bf62b489bf76382d7fae550694ab66902e1
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2014-2019, Ruslan Baratov & Luca Martini
# Copyright (c) 2014-2019, Michele Caini
# Copyright (c) 2017-2019, Robert Nitsch
# Copyright (c) 2018-2019, David Hirvonen
# Copyright (c) 2018-2019, Richard Hodges
# Copyright (c) 2020, Clemens Arth
# All rights reserved.
import os
import platform
class Toolchain:
def __init__(
self,
name,
generator,
toolset='',
arch='',
vs_version='',
ios_version='',
osx_version='',
xp=False,
nocodesign=False,
):
self.name = name
self.generator = generator
self.toolset = toolset
self.arch = arch
self.vs_version = vs_version
self.ios_version = ios_version
self.osx_version = osx_version
self.is_nmake = (self.generator == 'NMake Makefiles')
self.is_msvc = self.generator.startswith('Visual Studio')
self.is_make = self.generator.endswith('Makefiles')
self.is_ninja = (self.generator == 'Ninja')
self.xp = xp
self.is_xcode = (self.generator == 'Xcode')
self.multiconfig = (self.is_xcode or self.is_msvc)
self.nocodesign = nocodesign
self.verify()
def verify(self):
if self.arch:
assert(self.is_nmake or self.is_msvc or self.is_ninja)
assert(self.arch == 'amd64' or self.arch == 'x86' or
self.arch == 'arm' or self.arch == 'arm64')
if self.is_nmake or self.is_msvc:
assert(self.vs_version)
if self.ios_version or self.osx_version:
assert(self.generator == 'Xcode')
if self.xp:
assert(self.vs_version)
toolchain_table = [
Toolchain('default', ''),
Toolchain('cxx11', ''),
Toolchain('cxx17', ''),
Toolchain('android-ndk-r10e-api-8-armeabi-v7a', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-16-armeabi-v7a-neon', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-16-armeabi-v7a-neon-clang-35', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-16-armeabi-v7a-neon-clang-35-hid', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-16-armeabi-v7a-neon-clang-35-hid-sections', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-16-x86', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-16-x86-hid', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-16-x86-hid-sections', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-19-armeabi-v7a-neon', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-19-armeabi-v7a-neon-c11', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-19-armeabi-v7a-neon-hid-sections', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-19-armeabi-v7a-neon-hid-sections-lto', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-19-armeabi-v7a-neon-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-armeabi-v7a', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-armeabi-v7a-neon', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-armeabi-v7a-neon-hid-sections', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-armeabi-v7a-neon-clang-35', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-armeabi-v7a-neon-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-armeabi-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-armeabi', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-arm64-v8a', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-arm64-v8a-gcc-49', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-arm64-v8a-gcc-49-hid', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-arm64-v8a-gcc-49-hid-sections', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-arm64-v8a-clang-35', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-x86', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-x86-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-x86-64', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-x86-64-hid', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-x86-64-hid-sections', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-mips', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-mips64', 'Unix Makefiles'),
Toolchain('android-ndk-r10e-api-21-mips-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-8-armeabi-v7a', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-16-armeabi', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-16-armeabi-cxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-16-armeabi-v7a', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-16-armeabi-v7a-cxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-16-armeabi-v7a-neon', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-16-armeabi-v7a-neon-cxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-16-armeabi-v7a-neon-clang-35', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-16-armeabi-v7a-neon-clang-35-hid', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-16-x86', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-16-x86-hid', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-19-armeabi-v7a-neon', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-21-armeabi-v7a', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-21-armeabi-v7a-neon', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-21-armeabi-v7a-neon-clang-35', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-21-armeabi', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-21-arm64-v8a', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-21-arm64-v8a-gcc-49', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-21-arm64-v8a-gcc-49-hid', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-21-arm64-v8a-clang-35', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-21-x86', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-21-x86-64', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-21-x86-64-hid', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-21-mips', 'Unix Makefiles'),
Toolchain('android-ndk-r11c-api-21-mips64', 'Unix Makefiles'),
Toolchain('android-ndk-r12b-api-19-armeabi-v7a-neon', 'Unix Makefiles'),
Toolchain('android-ndk-r13b-api-19-armeabi-v7a-neon', 'Unix Makefiles'),
Toolchain('android-ndk-r14-api-16-armeabi-v7a-neon-clang-hid-sections-lto', 'Unix Makefiles'),
Toolchain('android-ndk-r14-api-19-armeabi-v7a-neon-c11', 'Unix Makefiles'),
Toolchain('android-ndk-r14-api-19-armeabi-v7a-neon-clang', 'Unix Makefiles'),
Toolchain('android-ndk-r14-api-19-armeabi-v7a-neon-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r14-api-21-arm64-v8a-neon-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r14-api-19-armeabi-v7a-neon-hid-sections-lto', 'Unix Makefiles'),
Toolchain('android-ndk-r14-api-21-arm64-v8a-clang-hid-sections-lto', 'Unix Makefiles'),
Toolchain('android-ndk-r14-api-21-x86-64', 'Unix Makefiles'),
Toolchain('android-ndk-r14b-api-21-armeabi-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r14b-api-21-armeabi-v7a-neon-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r14b-api-21-mips-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r14b-api-21-x86-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r15c-api-16-armeabi-v7a-neon-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r15c-api-16-armeabi-v7a-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r15c-api-16-armeabi-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r15c-api-16-mips-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r15c-api-16-x86-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r15c-api-21-arm64-v8a-neon-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r15c-api-21-arm64-v8a-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r15c-api-21-armeabi-v7a-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r15c-api-21-armeabi-v7a-neon-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r15c-api-21-armeabi-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r15c-api-21-mips-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r15c-api-21-x86-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r15c-api-21-x86-64-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r15c-api-24-armeabi-v7a-neon-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-16-armeabi-v7a-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-16-armeabi-v7a-thumb-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-16-x86-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-19-gcc-49-armeabi-v7a-neon-libcxx-hid-sections-lto', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-21-armeabi-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-21-armeabi-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-21-armeabi-v7a-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-21-armeabi-v7a-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-21-arm64-v8a-neon-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-21-arm64-v8a-neon-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-21-armeabi-v7a-neon-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-21-armeabi-v7a-neon-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-21-x86-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-21-x86-64-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-24-arm64-v8a-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-24-armeabi-v7a-neon-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-24-armeabi-v7a-neon-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-24-x86-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r16b-api-24-x86-64-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r17-api-24-arm64-v8a-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r17-api-24-arm64-v8a-clang-libcxx11', 'Unix Makefiles'),
Toolchain('android-ndk-r17-api-21-arm64-v8a-neon-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r17-api-16-armeabi-v7a-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r17-api-16-x86-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r17-api-21-x86-64-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r17-api-19-armeabi-v7a-neon-hid-sections', 'Unix Makefiles'),
Toolchain('android-ndk-r17-api-19-armeabi-v7a-neon-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r18-api-24-arm64-v8a-clang-libcxx14', 'Unix Makefiles'),
Toolchain('android-ndk-r18b-api-24-arm64-v8a-clang-libcxx11', 'Unix Makefiles'),
Toolchain('android-ndk-r18b-api-28-arm64-v8a-clang-libcxx11', 'Unix Makefiles'),
Toolchain('android-ndk-r18b-api-16-armeabi-v7a-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r18b-api-21-arm64-v8a-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r18b-api-21-armeabi-v7a-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r18b-api-21-x86-64-clang-libcxx', 'Unix Makefiles'),
Toolchain('android-ndk-r18b-api-21-x86-clang-libcxx', 'Unix Makefiles'),
Toolchain('emscripten-cxx11', 'Unix Makefiles'),
Toolchain('emscripten-cxx14', 'Unix Makefiles'),
Toolchain('emscripten-cxx17', 'Unix Makefiles'),
Toolchain('raspberrypi1-cxx11-pic', 'Unix Makefiles'),
Toolchain('raspberrypi1-cxx11-pic-static-std', 'Unix Makefiles'),
Toolchain('raspberrypi1-cxx14-pic-static-std', 'Unix Makefiles'),
Toolchain('raspberrypi2-cxx11', 'Unix Makefiles'),
Toolchain('raspberrypi2-cxx11-pic', 'Unix Makefiles'),
Toolchain('raspberrypi3-clang-cxx11', 'Unix Makefiles'),
Toolchain('raspberrypi3-clang-cxx14', 'Unix Makefiles'),
Toolchain('raspberrypi3-clang-cxx14-pic', 'Unix Makefiles'),
Toolchain('raspberrypi3-gcc-pic-hid-sections', 'Unix Makefiles'),
Toolchain('raspberrypi3-cxx14', 'Unix Makefiles'),
Toolchain('raspberrypi3-cxx11', 'Unix Makefiles')
]
if os.name == 'nt':
toolchain_table += [
Toolchain('mingw', 'MinGW Makefiles'),
Toolchain('mingw-c11', 'MinGW Makefiles'),
Toolchain('mingw-cxx14', 'MinGW Makefiles'),
Toolchain('mingw-cxx17', 'MinGW Makefiles'),
Toolchain('msys', 'MSYS Makefiles'),
Toolchain('msys-cxx14', 'MSYS Makefiles'),
Toolchain('msys-cxx17', 'MSYS Makefiles'),
Toolchain(
'nmake-vs-12-2013',
'NMake Makefiles',
arch='x86',
vs_version='12'
),
Toolchain(
'nmake-vs-12-2013-win64',
'NMake Makefiles',
arch='amd64',
vs_version='12'
),
Toolchain(
'nmake-vs-15-2017-win64',
'NMake Makefiles',
arch='amd64',
vs_version='15'
),
Toolchain(
'nmake-vs-15-2017-win64-cxx17',
'NMake Makefiles',
arch='amd64',
vs_version='15'
),
Toolchain(
'nmake-vs-15-2017-win64-cxx17-nonpermissive',
'NMake Makefiles',
arch='amd64',
vs_version='15'
),
Toolchain(
'ninja-vs-12-2013-win64',
'Ninja',
arch='amd64',
vs_version='12'
),
Toolchain(
'ninja-vs-14-2015-win64',
'Ninja',
arch='amd64',
vs_version='14'
),
Toolchain(
'ninja-vs-15-2017-win64',
'Ninja',
arch='amd64',
vs_version='15'
),
Toolchain(
'ninja-vs-15-2017-win64-cxx17',
'Ninja',
arch='amd64',
vs_version='15'
),
Toolchain(
'ninja-vs-15-2017-win64-cxx17-nonpermissive',
'Ninja',
arch='amd64',
vs_version='15'
),
Toolchain(
'vs-12-2013', 'Visual Studio 12 2013', arch='x86', vs_version='12'
),
Toolchain(
'vs-12-2013-mt', 'Visual Studio 12 2013', arch='x86', vs_version='12'
),
Toolchain(
'vs-10-2010', 'Visual Studio 10 2010', arch='x86', vs_version='10'
),
Toolchain(
'vs-11-2012', 'Visual Studio 11 2012', arch='x86', vs_version='11'
),
Toolchain(
'vs-14-2015', 'Visual Studio 14 2015', arch='x86', vs_version='14'
),
Toolchain(
'vs-15-2017', 'Visual Studio 15 2017', arch='x86', vs_version='15'
),
Toolchain(
'vs-15-2017-mt', 'Visual Studio 15 2017', arch='x86', vs_version='15'
),
Toolchain(
'vs-15-2017-cxx14-mt', 'Visual Studio 15 2017', arch='x86', vs_version='15'
),
Toolchain(
'vs-15-2017-cxx17', 'Visual Studio 15 2017', arch='x86', vs_version='15'
),
Toolchain(
'vs-14-2015-sdk-8-1', 'Visual Studio 14 2015', arch='x86', vs_version='14'
),
Toolchain(
'vs-9-2008', 'Visual Studio 9 2008', arch='x86', vs_version='9'
),
Toolchain(
'vs-8-2005', 'Visual Studio 8 2005', arch='x86', vs_version='8'
),
Toolchain(
'vs-12-2013-xp',
'Visual Studio 12 2013',
arch='x86',
vs_version='12',
xp=True
),
Toolchain(
'vs-11-2012-win64',
'Visual Studio 11 2012 Win64',
arch='amd64',
vs_version='11'
),
Toolchain(
'vs-12-2013-win64',
'Visual Studio 12 2013 Win64',
arch='amd64',
vs_version='12'
),
Toolchain(
'vs-14-2015-win64',
'Visual Studio 14 2015 Win64',
arch='amd64',
vs_version='14'
),
Toolchain(
'vs-14-2015-win64-sdk-8-1',
'Visual Studio 14 2015 Win64',
arch='amd64',
vs_version='14'
),
Toolchain(
'vs-14-2015-win64-sdk-8-1-cxx17',
'Visual Studio 14 2015 Win64',
arch='amd64',
vs_version='14'
),
Toolchain(
'vs-11-2012-arm',
'Visual Studio 11 2012 ARM',
vs_version='11'
),
Toolchain(
'vs-12-2013-arm',
'Visual Studio 12 2013 ARM',
vs_version='12'
),
Toolchain(
'vs-14-2015-arm',
'Visual Studio 14 2015 ARM',
vs_version='14'
),
Toolchain(
'vs-15-2017-arm',
'Visual Studio 15 2017',
arch='arm',
vs_version='15'
),
Toolchain(
'vs-15-2017-arm64',
'Visual Studio 15 2017',
arch='arm64',
vs_version='15'
),
Toolchain(
'vs-16-2019-arm',
'Visual Studio 16 2019',
arch='arm',
vs_version='16'
),
Toolchain(
'vs-16-2019-arm64',
'Visual Studio 16 2019',
arch='arm64',
vs_version='16'
),
Toolchain(
'vs-15-2017-win64',
'Visual Studio 15 2017 Win64',
arch='amd64',
vs_version='15'
),
Toolchain(
'vs-15-2017-win64-mt',
'Visual Studio 15 2017 Win64',
arch='amd64',
vs_version='15'
),
Toolchain(
'vs-15-2017-win64-cxx14-mt',
'Visual Studio 15 2017 Win64',
arch='amd64',
vs_version='15'
),
Toolchain(
'vs-15-2017-win64-cxx14',
'Visual Studio 15 2017 Win64',
arch='amd64',
vs_version='15'
),
Toolchain(
'vs-15-2017-win64-cxx17',
'Visual Studio 15 2017 Win64',
arch='amd64',
vs_version='15'
),
Toolchain(
'vs-15-2017-win64-cxx17-nonpermissive',
'Visual Studio 15 2017 Win64',
arch='amd64',
vs_version='15'
),
Toolchain(
'vs-15-2017-win64-llvm',
'Visual Studio 15 2017 Win64',
toolset='llvm',
arch='amd64',
vs_version='15'
),
Toolchain(
'vs-15-2017-win64-llvm-vs2014',
'Visual Studio 15 2017 Win64',
toolset='LLVM-vs2014',
arch='amd64',
vs_version='15'
),
Toolchain(
'vs-15-2017-win64-store-10-zw',
'Visual Studio 15 2017 Win64',
arch='amd64',
vs_version='15'
),
Toolchain(
'vs-15-2017-store-10-zw',
'Visual Studio 15 2017',
arch='x86',
vs_version='15'
),
Toolchain(
'vs-15-2017-win64-store-10-cxx17',
'Visual Studio 15 2017 Win64',
arch='amd64',
vs_version='15'
),
Toolchain(
'vs-15-2017-win64-z7',
'Visual Studio 15 2017 Win64',
arch='amd64',
vs_version='15'
),
Toolchain(
'vs-15-2017-win64-version-14-11',
'Visual Studio 15 2017 Win64',
arch='amd64',
vs_version='15',
toolset='version=14.11'
),
Toolchain(
'vs-16-2019',
'Visual Studio 16 2019',
arch='x86',
vs_version='16'
),
Toolchain(
'vs-16-2019-cxx14',
'Visual Studio 16 2019',
arch='x86',
vs_version='16'
),
Toolchain(
'vs-16-2019-cxx17',
'Visual Studio 16 2019',
arch='x86',
vs_version='16'
),
Toolchain(
'vs-16-2019-llvm-cxx17',
'Visual Studio 16 2019',
toolset='clangcl',
arch='x86',
vs_version='16'
),
Toolchain(
'vs-16-2019-win64',
'Visual Studio 16 2019',
arch='amd64',
vs_version='16'
),
Toolchain(
'vs-16-2019-win64-cxx14',
'Visual Studio 16 2019',
arch='amd64',
vs_version='16'
),
Toolchain(
'vs-16-2019-win64-cxx17',
'Visual Studio 16 2019',
arch='amd64',
vs_version='16'
),
Toolchain(
'vs-16-2019-win64-llvm-cxx17',
'Visual Studio 16 2019',
toolset='clangcl',
arch='amd64',
vs_version='16'
),
Toolchain(
'android-vc-ndk-r10e-api-19-arm-clang-3-6',
'Visual Studio 14 2015 ARM',
arch='',
vs_version='14'
),
Toolchain(
'android-vc-ndk-r10e-api-21-arm-clang-3-6',
'Visual Studio 14 2015 ARM',
arch='',
vs_version='14'
),
Toolchain(
'android-vc-ndk-r10e-api-19-x86-clang-3-6',
'Visual Studio 14 2015',
arch='',
vs_version='14'
),
Toolchain(
'android-vc-ndk-r10e-api-19-arm-gcc-4-9',
'Visual Studio 14 2015 ARM',
arch='',
vs_version='14'
),
]
if platform.system().startswith('CYGWIN'):
toolchain_table += [
Toolchain('cygwin', 'Unix Makefiles'),
]
if platform.system() == 'Linux':
toolchain_table += [
Toolchain('sanitize-leak', 'Unix Makefiles'),
Toolchain('sanitize-leak-cxx17', 'Unix Makefiles'),
Toolchain('sanitize-leak-cxx17-pic', 'Unix Makefiles'),
Toolchain('sanitize-memory', 'Unix Makefiles'),
Toolchain('linux-mingw-w32', 'Unix Makefiles'),
Toolchain('linux-mingw-w32-cxx14', 'Unix Makefiles'),
Toolchain('linux-mingw-w64', 'Unix Makefiles'),
Toolchain('linux-mingw-w64-cxx14', 'Unix Makefiles'),
Toolchain('linux-mingw-w64-cxx98', 'Unix Makefiles'),
Toolchain('linux-mingw-w64-gnuxx11', 'Unix Makefiles'),
Toolchain('linux-gcc-armhf', 'Unix Makefiles'),
Toolchain('linux-gcc-armhf-neon', 'Unix Makefiles'),
Toolchain('linux-gcc-armhf-neon-vfpv4', 'Unix Makefiles'),
Toolchain('linux-gcc-jetson-tk1', 'Unix Makefiles'),
]
if platform.system() == 'Darwin':
toolchain_table += [
Toolchain('ios', 'Xcode'),
Toolchain('ios-cxx17', 'Xcode'),
Toolchain('ios-bitcode', 'Xcode'),
Toolchain('ios-14-3-dep-10-0-arm64', 'Xcode', ios_version='14.3'),
Toolchain('ios-14-3-dep-10-0-armv7', 'Xcode', ios_version='14.3'),
Toolchain('ios-14-3-dep-10-0-armv7s', 'Xcode', ios_version='14.3'),
Toolchain('ios-14-3-dep-10-0-device-cxx14', 'Xcode', ios_version='14.3'),
Toolchain('ios-14-3-dep-10-0-device-bitcode-cxx14', 'Xcode', ios_version='14.3'),
Toolchain('ios-14-2-dep-10-0-arm64', 'Xcode', ios_version='14.2'),
Toolchain('ios-14-2-dep-10-0-armv7', 'Xcode', ios_version='14.2'),
Toolchain('ios-14-2-dep-10-0-armv7s', 'Xcode', ios_version='14.2'),
Toolchain('ios-14-2-dep-10-0-device-cxx14', 'Xcode', ios_version='14.2'),
Toolchain('ios-14-2-dep-10-0-device-bitcode-cxx14', 'Xcode', ios_version='14.2'),
Toolchain('ios-14-0-dep-9-3-arm64', 'Xcode', ios_version='14.0'),
Toolchain('ios-14-0-dep-9-3-armv7', 'Xcode', ios_version='14.0'),
Toolchain('ios-14-0-dep-9-3-armv7s', 'Xcode', ios_version='14.0'),
Toolchain('ios-14-0-dep-9-3-device-cxx14', 'Xcode', ios_version='14.0'),
Toolchain('ios-14-0-dep-9-3-device-bitcode-cxx14', 'Xcode', ios_version='14.0'),
Toolchain('ios-13-6-dep-9-3-arm64', 'Xcode', ios_version='13.6'),
Toolchain('ios-13-6-dep-9-3-armv7', 'Xcode', ios_version='13.6'),
Toolchain('ios-13-6-dep-9-3-armv7s', 'Xcode', ios_version='13.6'),
Toolchain('ios-13-6-dep-9-3-device-cxx14', 'Xcode', ios_version='13.6'),
Toolchain('ios-13-6-dep-9-3-device-bitcode-cxx14', 'Xcode', ios_version='13.6'),
Toolchain('ios-13-5-dep-9-3-arm64', 'Xcode', ios_version='13.5'),
Toolchain('ios-13-5-dep-9-3-armv7', 'Xcode', ios_version='13.5'),
Toolchain('ios-13-5-dep-9-3-armv7s', 'Xcode', ios_version='13.5'),
Toolchain('ios-13-5-dep-9-3-device-cxx14', 'Xcode', ios_version='13.5'),
Toolchain('ios-13-5-dep-9-3-device-bitcode-cxx14', 'Xcode', ios_version='13.5'),
Toolchain('ios-13-4-dep-9-3-arm64', 'Xcode', ios_version='13.4'),
Toolchain('ios-13-4-dep-9-3-armv7', 'Xcode', ios_version='13.4'),
Toolchain('ios-13-4-dep-9-3-armv7s', 'Xcode', ios_version='13.4'),
Toolchain('ios-13-4-dep-9-3-device-cxx14', 'Xcode', ios_version='13.4'),
Toolchain('ios-13-4-dep-9-3-device-bitcode-cxx14', 'Xcode', ios_version='13.4'),
Toolchain('ios-13-3-dep-9-3-arm64', 'Xcode', ios_version='13.3'),
Toolchain('ios-13-3-dep-9-3-armv7', 'Xcode', ios_version='13.3'),
Toolchain('ios-13-3-dep-9-3-armv7s', 'Xcode', ios_version='13.3'),
Toolchain('ios-13-3-dep-9-3-device-cxx14', 'Xcode', ios_version='13.3'),
Toolchain('ios-13-3-dep-9-3-device-bitcode-cxx14', 'Xcode', ios_version='13.3'),
Toolchain('ios-13-2-dep-10-0-arm64-bitcode-cxx17', 'Xcode', ios_version='13.2'),
Toolchain('ios-13-2-dep-9-3-arm64-bitcode', 'Xcode', ios_version='13.2'),
Toolchain('ios-13-2-dep-9-3-arm64', 'Xcode', ios_version='13.2'),
Toolchain('ios-13-2-dep-9-3-armv7', 'Xcode', ios_version='13.2'),
Toolchain('ios-13-2-dep-9-3-armv7s', 'Xcode', ios_version='13.2'),
Toolchain('ios-13-2-dep-9-3-device-cxx14', 'Xcode', ios_version='13.2'),
Toolchain('ios-13-2-dep-9-3-device-bitcode-cxx14', 'Xcode', ios_version='13.2'),
Toolchain('ios-13-0-dep-9-3-arm64', 'Xcode', ios_version='13.0'),
Toolchain('ios-13-0-dep-9-3-arm64-bitcode', 'Xcode', ios_version='13.0'),
Toolchain('ios-13-0-dep-11-0-arm64-bitcode-cxx17', 'Xcode', ios_version='13.0'),
Toolchain('ios-13-0-dep-10-0-arm64-bitcode-cxx17', 'Xcode', ios_version='13.0'),
Toolchain('ios-12-3-dep-9-3-arm64', 'Xcode', ios_version='12.3'),
Toolchain('ios-12-2-dep-9-3-arm64', 'Xcode', ios_version='12.2'),
Toolchain('ios-12-1-dep-9-0-device-bitcode-cxx14', 'Xcode', ios_version='12.1'),
Toolchain('ios-12-1-dep-9-0-device-bitcode-cxx17', 'Xcode', ios_version='12.1'),
Toolchain('ios-12-0-dep-11-0-arm64', 'Xcode', ios_version='12.0'),
Toolchain('ios-12-1-dep-11-0-arm64', 'Xcode', ios_version='12.1'),
Toolchain('ios-12-1-dep-12-0-arm64-cxx17', 'Xcode', ios_version='12.1'),
Toolchain('ios-12-1-dep-9-3-arm64-bitcode', 'Xcode', ios_version='12.1'),
Toolchain('ios-12-1-dep-9-3-arm64', 'Xcode', ios_version='12.1'),
Toolchain('ios-12-1-dep-9-3-armv7', 'Xcode', ios_version='12.1'),
Toolchain('ios-12-1-dep-9-3', 'Xcode', ios_version='12.1'),
Toolchain('ios-12-1-dep-9-3-x86-64-arm64', 'Xcode', ios_version='12.1'),
Toolchain('ios-11-4-dep-9-3-arm64', 'Xcode', ios_version='11.4'),
Toolchain('ios-11-4-dep-9-3-armv7', 'Xcode', ios_version='11.4'),
Toolchain('ios-11-4-dep-9-3-arm64-armv7', 'Xcode', ios_version='11.4'),
Toolchain('ios-11-4-dep-9-3', 'Xcode', ios_version='11.4'),
Toolchain('ios-11-4-dep-9-4-arm64', 'Xcode', ios_version='11.4'),
Toolchain('ios-11-4-dep-9-3-arm64-hid-sections-lto-cxx11', 'Xcode', ios_version='11.4'),
Toolchain('ios-11-4-dep-8-0-arm64-armv7-hid-sections-lto-cxx11', 'Xcode', ios_version='11.4'),
Toolchain('ios-11-4-dep-8-0-arm64-hid-sections-lto-cxx11', 'Xcode', ios_version='11.4'),
Toolchain('ios-11-3-dep-9-0-arm64', 'Xcode', ios_version='11.3'),
Toolchain('ios-11-4-dep-9-0-device-bitcode-cxx11', 'Xcode', ios_version='11.4'),
Toolchain('ios-12-0-dep-9-0-device-bitcode-cxx11', 'Xcode', ios_version='12.0'),
Toolchain('ios-11-4-dep-9-0-device-bitcode-nocxx', 'Xcode', ios_version='11.4'),
Toolchain('ios-11-3-dep-9-0-device-bitcode', 'Xcode', ios_version='11.3'),
Toolchain('ios-11-3-dep-9-0-device-bitcode-nocxx', 'Xcode', ios_version='11.3'),
Toolchain('ios-11-3-dep-9-0-device-bitcode-cxx11', 'Xcode', ios_version='11.3'),
Toolchain('ios-11-3-dep-9-0-device-bitcode-cxx17', 'Xcode', ios_version='11.3'),
Toolchain('ios-11-2-dep-9-0-device-bitcode-cxx11', 'Xcode', ios_version='11.2'),
Toolchain('ios-11-2-dep-9-0-device-bitcode-nocxx', 'Xcode', ios_version='11.2'),
Toolchain('ios-11-2-dep-9-3-arm64-armv7', 'Xcode', ios_version='11.2'),
Toolchain('ios-11-3-dep-9-3-arm64-armv7', 'Xcode', ios_version='11.3'),
Toolchain('ios-11-1-dep-9-0-bitcode-cxx11', 'Xcode', ios_version='11.1'),
Toolchain('ios-11-1-dep-9-0-device-bitcode-cxx11', 'Xcode', ios_version='11.1'),
Toolchain('ios-11-0-dep-9-0-bitcode-cxx11', 'Xcode', ios_version='11.0'),
Toolchain('ios-11-0-dep-9-0-device-bitcode-cxx11', 'Xcode', ios_version='11.0'),
Toolchain('ios-11-0-dep-9-0-x86-64-arm64-bitcode-cxx11', 'Xcode', ios_version='11.0'),
Toolchain('ios-11-0', 'Xcode', ios_version='11.0'),
Toolchain('ios-10-3', 'Xcode', ios_version='10.3'),
Toolchain('ios-10-3-dep-8-0-bitcode', 'Xcode', ios_version='10.3'),
Toolchain('ios-10-3-dep-9-0-bitcode', 'Xcode', ios_version='10.3'),
Toolchain('ios-10-3-dep-9-3-i386-armv7', 'Xcode', ios_version='10.3'),
Toolchain('ios-10-3-dep-9-3-x86-64-arm64', 'Xcode', ios_version='10.3'),
Toolchain('ios-10-3-lto', 'Xcode', ios_version='10.3'),
Toolchain('ios-10-3-armv7', 'Xcode', ios_version='10.3'),
Toolchain('ios-10-3-arm64', 'Xcode', ios_version='10.3'),
Toolchain('ios-10-2', 'Xcode', ios_version='10.2'),
Toolchain('ios-10-2-dep-9-3-armv7', 'Xcode', ios_version='10.2'),
Toolchain('ios-10-2-dep-9-3-arm64', 'Xcode', ios_version='10.2'),
Toolchain('ios-10-1', 'Xcode', ios_version='10.1'),
Toolchain('ios-10-1-arm64', 'Xcode', ios_version='10.1'),
Toolchain('ios-10-1-arm64-dep-8-0-hid-sections', 'Xcode', ios_version='10.1'),
Toolchain('ios-10-1-armv7', 'Xcode', ios_version='10.1'),
Toolchain('ios-10-1-dep-8-0-hid-sections', 'Xcode', ios_version='10.1'),
Toolchain('ios-10-1-dep-8-0-libcxx-hid-sections', 'Xcode', ios_version='10.1'),
Toolchain('ios-10-1-dep-8-0-libcxx-hid-sections-lto', 'Xcode', ios_version='10.1'),
Toolchain('ios-10-1-wo-armv7s', 'Xcode', ios_version='10.1'),
Toolchain('ios-10-0', 'Xcode', ios_version='10.0'),
Toolchain('ios-10-0-arm64', 'Xcode', ios_version='10.0'),
Toolchain('ios-10-0-arm64-dep-8-0-hid-sections', 'Xcode', ios_version='10.0'),
Toolchain('ios-10-0-armv7', 'Xcode', ios_version='10.0'),
Toolchain('ios-10-0-dep-8-0-hid-sections', 'Xcode', ios_version='10.0'),
Toolchain('ios-10-0-wo-armv7s', 'Xcode', ios_version='10.0'),
Toolchain('ios-9-3', 'Xcode', ios_version='9.3'),
Toolchain('ios-9-3-arm64', 'Xcode', ios_version='9.3'),
Toolchain('ios-9-3-armv7', 'Xcode', ios_version='9.3'),
Toolchain('ios-9-3-wo-armv7s', 'Xcode', ios_version='9.3'),
Toolchain('ios-9-2', 'Xcode', ios_version='9.2'),
Toolchain('ios-9-2-arm64', 'Xcode', ios_version='9.2'),
Toolchain('ios-9-2-armv7', 'Xcode', ios_version='9.2'),
Toolchain('ios-9-2-hid', 'Xcode', ios_version='9.2'),
Toolchain('ios-9-2-hid-sections', 'Xcode', ios_version='9.2'),
Toolchain('ios-9-1-armv7', 'Xcode', ios_version='9.1'),
Toolchain('ios-9-1-arm64', 'Xcode', ios_version='9.1'),
Toolchain('ios-9-1-dep-7-0-armv7', 'Xcode', ios_version='9.1'),
Toolchain('ios-9-1-hid', 'Xcode', ios_version='9.1'),
Toolchain('ios-9-1-dep-8-0-hid', 'Xcode', ios_version='9.1'),
Toolchain('ios-9-1', 'Xcode', ios_version='9.1'),
Toolchain('ios-9-0', 'Xcode', ios_version='9.0'),
Toolchain('ios-9-0-armv7', 'Xcode', ios_version='9.0'),
Toolchain('ios-9-0-i386-armv7', 'Xcode', ios_version='9.0'),
Toolchain('ios-9-0-wo-armv7s', 'Xcode', ios_version='9.0'),
Toolchain('ios-9-0-dep-7-0-armv7', 'Xcode', ios_version='9.0'),
Toolchain('ios-8-4', 'Xcode', ios_version='8.4'),
Toolchain('ios-8-4-arm64', 'Xcode', ios_version='8.4'),
Toolchain('ios-8-4-armv7', 'Xcode', ios_version='8.4'),
Toolchain('ios-8-4-armv7s', 'Xcode', ios_version='8.4'),
Toolchain('ios-8-4-hid', 'Xcode', ios_version='8.4'),
Toolchain('ios-8-2', 'Xcode', ios_version='8.2'),
Toolchain('ios-8-2-i386-arm64', 'Xcode', ios_version='8.2'),
Toolchain('ios-8-2-arm64', 'Xcode', ios_version='8.2'),
Toolchain('ios-8-2-arm64-hid', 'Xcode', ios_version='8.2'),
Toolchain('ios-8-2-cxx98', 'Xcode', ios_version='8.2'),
Toolchain('ios-8-1', 'Xcode', ios_version='8.1'),
Toolchain('ios-8-0', 'Xcode', ios_version='8.0'),
Toolchain('ios-7-1', 'Xcode', ios_version='7.1'),
Toolchain('ios-7-0', 'Xcode', ios_version='7.0'),
Toolchain('ios-dep-8-0-arm64-cxx11', 'Xcode'),
Toolchain('ios-dep-8-0-arm64-armv7-hid-sections-cxx11', 'Xcode'),
Toolchain('ios-dep-8-0-arm64-armv7-hid-sections-lto-cxx11', 'Xcode'),
Toolchain('ios-dep-10-0-bitcode-cxx17', 'Xcode'),
Toolchain('ios-dep-11-0-bitcode-cxx17', 'Xcode'),
Toolchain('ios-dep-12-0-bitcode-cxx17', 'Xcode'),
Toolchain('ios-nocodesign', 'Xcode', nocodesign=True),
Toolchain('ios-nocodesign-arm64', 'Xcode', ios_version='8.1', nocodesign=True),
Toolchain('ios-nocodesign-armv7', 'Xcode', ios_version='8.1', nocodesign=True),
Toolchain('ios-nocodesign-hid-sections', 'Xcode', ios_version='8.1', nocodesign=True),
Toolchain('ios-nocodesign-wo-armv7s', 'Xcode', ios_version='8.1', nocodesign=True),
Toolchain('ios-nocodesign-8-4', 'Xcode', ios_version='8.4', nocodesign=True),
Toolchain('ios-nocodesign-8-1', 'Xcode', ios_version='8.1', nocodesign=True),
Toolchain('ios-nocodesign-9-1', 'Xcode', ios_version='9.1', nocodesign=True),
Toolchain('ios-nocodesign-9-1-arm64', 'Xcode', ios_version='9.1', nocodesign=True),
Toolchain('ios-nocodesign-9-1-armv7', 'Xcode', ios_version='9.1', nocodesign=True),
Toolchain('ios-nocodesign-9-2', 'Xcode', ios_version='9.2', nocodesign=True),
Toolchain('ios-nocodesign-9-2-arm64', 'Xcode', ios_version='9.2', nocodesign=True),
Toolchain('ios-nocodesign-9-2-armv7', 'Xcode', ios_version='9.2', nocodesign=True),
Toolchain('ios-nocodesign-9-3', 'Xcode', ios_version='9.3', nocodesign=True),
Toolchain('ios-nocodesign-9-3-device', 'Xcode', ios_version='9.3', nocodesign=True),
Toolchain('ios-nocodesign-9-3-device-hid-sections', 'Xcode', ios_version='9.3', nocodesign=True),
Toolchain('ios-nocodesign-9-3-arm64', 'Xcode', ios_version='9.3', nocodesign=True),
Toolchain('ios-nocodesign-9-3-armv7', 'Xcode', ios_version='9.3', nocodesign=True),
Toolchain('ios-nocodesign-9-3-wo-armv7s', 'Xcode', ios_version='9.3', nocodesign=True),
Toolchain('ios-nocodesign-10-0', 'Xcode', ios_version='10.0', nocodesign=True),
Toolchain('ios-nocodesign-10-0-arm64', 'Xcode', ios_version='10.0', nocodesign=True),
Toolchain('ios-nocodesign-10-0-armv7', 'Xcode', ios_version='10.0', nocodesign=True),
Toolchain('ios-nocodesign-10-0-wo-armv7s', 'Xcode', ios_version='10.0', nocodesign=True),
Toolchain('ios-nocodesign-10-1', 'Xcode', ios_version='10.1', nocodesign=True),
Toolchain('ios-nocodesign-10-1-arm64', 'Xcode', ios_version='10.1', nocodesign=True),
Toolchain('ios-nocodesign-10-1-armv7', 'Xcode', ios_version='10.1', nocodesign=True),
Toolchain('ios-nocodesign-10-1-wo-armv7s', 'Xcode', ios_version='10.1', nocodesign=True),
Toolchain('ios-nocodesign-10-1-arm64-dep-9-0-device-libcxx-hid-sections-lto', 'Xcode', ios_version='10.1', nocodesign=True),
Toolchain('ios-nocodesign-10-1-arm64-dep-9-0-device-libcxx-hid-sections', 'Xcode', ios_version='10.1', nocodesign=True),
Toolchain('ios-nocodesign-10-1-dep-8-0-libcxx-hid-sections-lto', 'Xcode', ios_version='10.1', nocodesign=True),
Toolchain('ios-nocodesign-10-1-dep-8-0-device-libcxx-hid-sections-lto', 'Xcode', ios_version='10.1', nocodesign=True),
Toolchain('ios-nocodesign-10-1-dep-9-0-device-libcxx-hid-sections-lto', 'Xcode', ios_version='10.1', nocodesign=True),
Toolchain('ios-nocodesign-10-2', 'Xcode', ios_version='10.2', nocodesign=True),
Toolchain('ios-nocodesign-10-3', 'Xcode', ios_version='10.3', nocodesign=True),
Toolchain('ios-nocodesign-10-3-cxx14', 'Xcode', ios_version='10.3', nocodesign=True),
Toolchain('ios-nocodesign-10-3-arm64-dep-9-0-device-libcxx-hid-sections', 'Xcode', ios_version='10.3', nocodesign=True),
Toolchain('ios-nocodesign-10-3-dep-9-0-bitcode', 'Xcode', ios_version='10.3', nocodesign=True),
Toolchain('ios-nocodesign-10-3-wo-armv7s', 'Xcode', ios_version='10.3', nocodesign=True),
Toolchain('ios-nocodesign-10-3-arm64', 'Xcode', ios_version='10.3', nocodesign=True),
Toolchain('ios-nocodesign-10-3-armv7', 'Xcode', ios_version='10.3', nocodesign=True),
Toolchain('ios-nocodesign-11-0', 'Xcode', ios_version='11.0', nocodesign=True),
Toolchain('ios-nocodesign-11-0-dep-9-0-bitcode-cxx11', 'Xcode', ios_version='11.0', nocodesign=True),
Toolchain('ios-nocodesign-11-0-arm64-dep-9-0-device-libcxx-hid-sections', 'Xcode', ios_version='11.0', nocodesign=True),
Toolchain('ios-nocodesign-11-1', 'Xcode', ios_version='11.1', nocodesign=True),
Toolchain('ios-nocodesign-11-1-dep-9-0-wo-armv7s-bitcode-cxx11', 'Xcode', ios_version='11.1', nocodesign=True),
Toolchain('ios-nocodesign-11-1-dep-9-0-bitcode-cxx11', 'Xcode', ios_version='11.1', nocodesign=True),
Toolchain('ios-nocodesign-11-2-dep-8-0-wo-armv7s-bitcode-cxx11', 'Xcode', ios_version='11.2', nocodesign=True),
Toolchain('ios-nocodesign-11-2-dep-9-0-bitcode-cxx11', 'Xcode', ios_version='11.2', nocodesign=True),
Toolchain('ios-nocodesign-11-2-dep-9-3', 'Xcode', ios_version='11.2', nocodesign=True),
Toolchain('ios-nocodesign-11-2-dep-9-3-armv7', 'Xcode', ios_version='11.2', nocodesign=True),
Toolchain('ios-nocodesign-11-2-dep-9-3-arm64', 'Xcode', ios_version='11.2', nocodesign=True),
Toolchain('ios-nocodesign-11-2-dep-9-3-arm64-armv7', 'Xcode', ios_version='11.2', nocodesign=True),
Toolchain('ios-nocodesign-11-2-dep-9-3-i386-armv7', 'Xcode', ios_version='11.2', nocodesign=True),
Toolchain('ios-nocodesign-11-2', 'Xcode', ios_version='11.2', nocodesign=True),
Toolchain('ios-nocodesign-11-3-dep-9-3', 'Xcode', ios_version='11.3', nocodesign=True),
Toolchain('ios-nocodesign-11-3-dep-9-3-armv7', 'Xcode', ios_version='11.3', nocodesign=True),
Toolchain('ios-nocodesign-11-3-dep-9-3-arm64', 'Xcode', ios_version='11.3', nocodesign=True),
Toolchain('ios-nocodesign-11-3-dep-9-0-bitcode-cxx11', 'Xcode', ios_version='11.3', nocodesign=True),
Toolchain('ios-nocodesign-11-4-dep-9-0-bitcode-cxx11', 'Xcode', ios_version='11.4', nocodesign=True),
Toolchain('ios-nocodesign-12-0-dep-9-0-bitcode-cxx11', 'Xcode', ios_version='12.0', nocodesign=True),
Toolchain('ios-nocodesign-12-0-dep-10-0-bitcode-cxx11', 'Xcode', ios_version='12.0', nocodesign=True),
Toolchain('ios-nocodesign-12-1-dep-9-0-bitcode-cxx11', 'Xcode', ios_version='12.1', nocodesign=True),
Toolchain('ios-nocodesign-11-4-dep-9-3', 'Xcode', ios_version='11.4', nocodesign=True),
Toolchain('ios-nocodesign-11-4-dep-9-3-arm64', 'Xcode', ios_version='11.4', nocodesign=True),
Toolchain('ios-nocodesign-11-4-dep-9-3-armv7', 'Xcode', ios_version='11.4', nocodesign=True),
Toolchain('ios-nocodesign-12-1-dep-9-3-armv7', 'Xcode', ios_version='12.1', nocodesign=True),
Toolchain('ios-nocodesign-12-1', 'Xcode', ios_version='12.1', nocodesign=True),
Toolchain('ios-nocodesign-13-0-dep-9-3-arm64', 'Xcode', ios_version='13.0', nocodesign=True),
Toolchain('ios-nocodesign-13-2-dep-9-3-arm64', 'Xcode', ios_version='13.2', nocodesign=True),
Toolchain('ios-nocodesign-13-2-dep-9-3-armv7', 'Xcode', ios_version='13.2', nocodesign=True),
Toolchain('ios-nocodesign-13-2-dep-9-3-armv7s', 'Xcode', ios_version='13.2', nocodesign=True),
Toolchain('ios-nocodesign-13-2-dep-9-3', 'Xcode', ios_version='13.2', nocodesign=True),
Toolchain('ios-nocodesign-13-2-dep-9-3-device-cxx11', 'Xcode', ios_version='13.2', nocodesign=True),
Toolchain('ios-nocodesign-13-2-dep-9-3-device', 'Xcode', ios_version='13.2', nocodesign=True),
Toolchain('ios-nocodesign-13-5-dep-9-3-arm64', 'Xcode', ios_version='13.5', nocodesign=True),
Toolchain('ios-nocodesign-13-5-dep-9-3-armv7', 'Xcode', ios_version='13.5', nocodesign=True),
Toolchain('ios-nocodesign-13-5-dep-9-3-armv7s', 'Xcode', ios_version='13.5', nocodesign=True),
Toolchain('ios-nocodesign-13-5-dep-9-3', 'Xcode', ios_version='13.5', nocodesign=True),
Toolchain('ios-nocodesign-13-5-dep-9-3-device-cxx11', 'Xcode', ios_version='13.5', nocodesign=True),
Toolchain('ios-nocodesign-13-5-dep-9-3-device', 'Xcode', ios_version='13.5', nocodesign=True),
Toolchain('ios-nocodesign-13-6-dep-9-3-arm64', 'Xcode', ios_version='13.6', nocodesign=True),
Toolchain('ios-nocodesign-13-6-dep-9-3-armv7', 'Xcode', ios_version='13.6', nocodesign=True),
Toolchain('ios-nocodesign-13-6-dep-9-3-armv7s', 'Xcode', ios_version='13.6', nocodesign=True),
Toolchain('ios-nocodesign-13-6-dep-9-3', 'Xcode', ios_version='13.6', nocodesign=True),
Toolchain('ios-nocodesign-13-6-dep-9-3-device-cxx11', 'Xcode', ios_version='13.6', nocodesign=True),
Toolchain('ios-nocodesign-13-6-dep-9-3-device', 'Xcode', ios_version='13.6', nocodesign=True),
Toolchain('ios-nocodesign-14-0-dep-9-3-arm64', 'Xcode', ios_version='14.0', nocodesign=True),
Toolchain('ios-nocodesign-14-0-dep-9-3-armv7', 'Xcode', ios_version='14.0', nocodesign=True),
Toolchain('ios-nocodesign-14-0-dep-9-3-armv7s', 'Xcode', ios_version='14.0', nocodesign=True),
Toolchain('ios-nocodesign-14-0-dep-9-3', 'Xcode', ios_version='14.0', nocodesign=True),
Toolchain('ios-nocodesign-14-0-dep-9-3-device-cxx11', 'Xcode', ios_version='14.0', nocodesign=True),
Toolchain('ios-nocodesign-14-0-dep-9-3-device', 'Xcode', ios_version='14.0', nocodesign=True),
Toolchain('ios-nocodesign-14-2-dep-10-0-arm64', 'Xcode', ios_version='14.2', nocodesign=True),
Toolchain('ios-nocodesign-14-2-dep-10-0-armv7', 'Xcode', ios_version='14.2', nocodesign=True),
Toolchain('ios-nocodesign-14-2-dep-10-0-armv7s', 'Xcode', ios_version='14.2', nocodesign=True),
Toolchain('ios-nocodesign-14-2-dep-10-0', 'Xcode', ios_version='14.2', nocodesign=True),
Toolchain('ios-nocodesign-14-2-dep-10-0-device-cxx11', 'Xcode', ios_version='14.2', nocodesign=True),
Toolchain('ios-nocodesign-14-2-dep-10-0-device', 'Xcode', ios_version='14.2', nocodesign=True),
Toolchain('ios-nocodesign-14-3-dep-10-0-arm64', 'Xcode', ios_version='14.3', nocodesign=True),
Toolchain('ios-nocodesign-14-3-dep-10-0-armv7', 'Xcode', ios_version='14.3', nocodesign=True),
Toolchain('ios-nocodesign-14-3-dep-10-0-armv7s', 'Xcode', ios_version='14.3', nocodesign=True),
Toolchain('ios-nocodesign-14-3-dep-10-0', 'Xcode', ios_version='14.3', nocodesign=True),
Toolchain('ios-nocodesign-14-3-dep-10-0-device-cxx11', 'Xcode', ios_version='14.3', nocodesign=True),
Toolchain('ios-nocodesign-14-3-dep-10-0-device', 'Xcode', ios_version='14.3', nocodesign=True),
Toolchain('ios-nocodesign-dep-9-0-cxx14', 'Xcode', nocodesign=True),
Toolchain('xcode', 'Xcode'),
Toolchain('xcode-cxx98', 'Xcode'),
Toolchain('xcode-cxx17', 'Xcode'),
Toolchain('xcode-nocxx', 'Xcode'),
Toolchain('xcode-gcc', 'Xcode'),
Toolchain('xcode-hid-sections', 'Xcode'),
Toolchain('xcode-sections', 'Xcode'),
Toolchain('osx-10-7', 'Xcode', osx_version='10.7'),
Toolchain('osx-10-8', 'Xcode', osx_version='10.8'),
Toolchain('osx-10-9', 'Xcode', osx_version='10.9'),
Toolchain('osx-10-10', 'Xcode', osx_version='10.10'),
Toolchain('osx-10-11', 'Xcode', osx_version='10.11'),
Toolchain('osx-10-11-hid-sections', 'Xcode', osx_version='10.11'),
Toolchain('osx-10-11-hid-sections-lto', 'Xcode', osx_version='10.11'),
Toolchain('osx-10-11-lto', 'Xcode', osx_version='10.11'),
Toolchain('osx-10-12', 'Xcode', osx_version='10.12'),
Toolchain('osx-10-12-hid-sections', 'Xcode', osx_version='10.12'),
Toolchain('osx-10-12-lto', 'Xcode', osx_version='10.12'),
Toolchain('osx-10-12-cxx98', 'Xcode', osx_version='10.12'),
Toolchain('osx-10-12-cxx14', 'Xcode', osx_version='10.12'),
Toolchain('osx-10-12-cxx17', 'Xcode', osx_version='10.12'),
Toolchain('osx-10-10-dep-10-7', 'Xcode', osx_version='10.10'),
Toolchain('osx-10-12-dep-10-10', 'Xcode', osx_version='10.12'),
Toolchain('osx-10-12-dep-10-10-lto', 'Xcode', osx_version='10.12'),
Toolchain('osx-10-10-dep-10-9-make', 'Unix Makefiles'),
Toolchain('osx-10-11-make', 'Unix Makefiles'),
Toolchain('osx-10-12-make', 'Unix Makefiles'),
Toolchain('osx-10-12-ninja', 'Ninja'),
Toolchain('osx-10-11-sanitize-address', 'Xcode', osx_version='10.11'),
Toolchain('osx-10-12-sanitize-address', 'Xcode', osx_version='10.12'),
Toolchain('osx-10-12-sanitize-address-hid-sections', 'Xcode', osx_version='10.12'),
Toolchain('osx-10-13', 'Xcode', osx_version='10.13'),
Toolchain('osx-10-13-dep-10-10', 'Xcode', osx_version='10.13'),
Toolchain('osx-10-13-dep-10-10-cxx14', 'Xcode', osx_version='10.13'),
Toolchain('osx-10-13-dep-10-10-cxx17', 'Xcode', osx_version='10.13'),
Toolchain('osx-10-13-dep-10-12', 'Xcode', osx_version='10.13'),
Toolchain('osx-10-13-dep-10-12-cxx14', 'Xcode', osx_version='10.13'),
Toolchain('osx-10-13-dep-10-12-cxx17', 'Xcode', osx_version='10.13'),
Toolchain('osx-10-13-make-cxx14', 'Unix Makefiles'),
Toolchain('osx-10-13-cxx14', 'Xcode', osx_version='10.13'),
Toolchain('osx-10-13-cxx17', 'Xcode', osx_version='10.13'),
Toolchain('osx-10-13-i386-cxx14', 'Xcode', osx_version='10.13'),
Toolchain('osx-10-14', 'Xcode', osx_version='10.14'),
Toolchain('osx-10-14-dep-10-10', 'Xcode', osx_version='10.14'),
Toolchain('osx-10-14-dep-10-10-cxx14', 'Xcode', osx_version='10.14'),
Toolchain('osx-10-14-dep-10-10-cxx17', 'Xcode', osx_version='10.14'),
Toolchain('osx-10-14-dep-10-12', 'Xcode', osx_version='10.14'),
Toolchain('osx-10-14-dep-10-12-cxx14', 'Xcode', osx_version='10.14'),
Toolchain('osx-10-14-dep-10-12-cxx17', 'Xcode', osx_version='10.14'),
Toolchain('osx-10-14-cxx14', 'Xcode', osx_version='10.14'),
Toolchain('osx-10-14-cxx17', 'Xcode', osx_version='10.14'),
Toolchain('osx-10-15', 'Xcode', osx_version='10.15'),
Toolchain('osx-10-15-cxx17', 'Xcode', osx_version='10.15'),
Toolchain('osx-10-15-dep-10-10', 'Xcode', osx_version='10.15'),
Toolchain('osx-10-15-dep-10-10-cxx14', 'Xcode', osx_version='10.15'),
Toolchain('osx-10-15-dep-10-10-cxx17', 'Xcode', osx_version='10.15'),
Toolchain('osx-10-15-dep-10-12-cxx17', 'Xcode', osx_version='10.15'),
Toolchain('osx-11-0', 'Xcode', osx_version='11.0'),
Toolchain('osx-11-0-cxx17', 'Xcode', osx_version='11.0'),
Toolchain('osx-11-0-dep-10-10-cxx17', 'Xcode', osx_version='11.0'),
Toolchain('osx-11-1-dep-10-10-cxx17', 'Xcode', osx_version='11.1'),
Toolchain('linux-gcc-x64', 'Unix Makefiles'),
]
if os.name == 'posix':
toolchain_table += [
Toolchain('analyze', 'Unix Makefiles'),
Toolchain('analyze-cxx17', 'Unix Makefiles'),
Toolchain('clang-5', 'Unix Makefiles'),
Toolchain('clang-5-cxx14', 'Unix Makefiles'),
Toolchain('clang-5-cxx17', 'Unix Makefiles'),
Toolchain('clang-cxx20', 'Unix Makefiles'),
Toolchain('clang-cxx17', 'Unix Makefiles'),
Toolchain('clang-cxx17-pic', 'Unix Makefiles'),
Toolchain('clang-cxx14', 'Unix Makefiles'),
Toolchain('clang-cxx14-pic', 'Unix Makefiles'),
Toolchain('clang-cxx11', 'Unix Makefiles'),
Toolchain('clang-libcxx', 'Unix Makefiles'),
Toolchain('clang-libcxx-fpic', 'Unix Makefiles'),
Toolchain('clang-libcxx14', 'Unix Makefiles'),
Toolchain('clang-libcxx14-fpic', 'Unix Makefiles'),
Toolchain('clang-libcxx17', 'Unix Makefiles'),
Toolchain('clang-libcxx17-fpic', 'Unix Makefiles'),
Toolchain('clang-libcxx98', 'Unix Makefiles'),
Toolchain('clang-libcxx17-static', 'Unix Makefiles'),
Toolchain('clang-lto', 'Unix Makefiles'),
Toolchain('clang-libstdcxx', 'Unix Makefiles'),
Toolchain('clang-omp', 'Unix Makefiles'),
Toolchain('clang-fpic', 'Unix Makefiles'),
Toolchain('clang-fpic-hid-sections', 'Unix Makefiles'),
Toolchain('clang-fpic-static-std', 'Unix Makefiles'),
Toolchain('clang-fpic-static-std-cxx14', 'Unix Makefiles'),
Toolchain('clang-tidy', 'Unix Makefiles'),
Toolchain('clang-tidy-libcxx', 'Unix Makefiles'),
Toolchain('gcc', 'Unix Makefiles'),
Toolchain('gcc-ninja', 'Ninja'),
Toolchain('gcc-static', 'Unix Makefiles'),
Toolchain('gcc-static-std', 'Unix Makefiles'),
Toolchain('gcc-musl', 'Unix Makefiles'),
Toolchain('gcc-32bit', 'Unix Makefiles'),
Toolchain('gcc-32bit-pic', 'Unix Makefiles'),
Toolchain('gcc-hid', 'Unix Makefiles'),
Toolchain('gcc-hid-fpic', 'Unix Makefiles'),
Toolchain('gcc-gold', 'Unix Makefiles'),
Toolchain('gcc-pic', 'Unix Makefiles'),
Toolchain('gcc-pic-cxx17', 'Unix Makefiles'),
Toolchain('gcc-c11', 'Unix Makefiles'),
Toolchain('gcc-cxx14-c11', 'Unix Makefiles'),
Toolchain('gcc-cxx17-c11', 'Unix Makefiles'),
Toolchain('gcc-4-8', 'Unix Makefiles'),
Toolchain('gcc-4-8-c11', 'Unix Makefiles'),
Toolchain('gcc-4-8-pic', 'Unix Makefiles'),
Toolchain('gcc-4-8-pic-hid-sections', 'Unix Makefiles'),
Toolchain('gcc-4-8-pic-hid-sections-cxx11-c11', 'Unix Makefiles'),
Toolchain('gcc-pic-hid-sections', 'Unix Makefiles'),
Toolchain('gcc-pic-hid-sections-lto', 'Unix Makefiles'),
Toolchain('gcc-5-pic-hid-sections-lto', 'Unix Makefiles'),
Toolchain('gcc-5-pic-hid-sections', 'Unix Makefiles'),
Toolchain('gcc-5', 'Unix Makefiles'),
Toolchain('gcc-5-cxx14-c11', 'Unix Makefiles'),
Toolchain('gcc-6-32bit-cxx14', 'Unix Makefiles'),
Toolchain('gcc-7', 'Unix Makefiles'),
Toolchain('gcc-7-cxx11-pic', 'Unix Makefiles'),
Toolchain('gcc-7-cxx14', 'Unix Makefiles'),
Toolchain('gcc-7-cxx14-pic', 'Unix Makefiles'),
Toolchain('gcc-7-cxx17', 'Unix Makefiles'),
Toolchain('gcc-7-cxx17-gnu', 'Unix Makefiles'),
Toolchain('gcc-7-cxx17-pic', 'Unix Makefiles'),
Toolchain('gcc-7-cxx17-concepts', 'Unix Makefiles'),
Toolchain('gcc-7-pic-hid-sections-lto', 'Unix Makefiles'),
Toolchain('gcc-8-cxx14', 'Unix Makefiles'),
Toolchain('gcc-8-cxx14-fpic', 'Unix Makefiles'),
Toolchain('gcc-8-cxx17', 'Unix Makefiles'),
Toolchain('gcc-8-cxx17-fpic', 'Unix Makefiles'),
Toolchain('gcc-8-cxx17-gnu-fpic', 'Unix Makefiles'),
Toolchain('gcc-8-cxx17-concepts', 'Unix Makefiles'),
Toolchain('gcc-9', 'Unix Makefiles'),
Toolchain('gcc-9-cxx17', 'Unix Makefiles'),
Toolchain('gcc-9-cxx17-fpic', 'Unix Makefiles'),
Toolchain('gcc-9-cxx17-gnu-fpic', 'Unix Makefiles'),
Toolchain('gcc-10', 'Unix Makefiles'),
Toolchain('gcc-10-cxx17', 'Unix Makefiles'),
Toolchain('gcc-10-cxx17-fpic', 'Unix Makefiles'),
Toolchain('gcc-10-cxx17-gnu-fpic', 'Unix Makefiles'),
Toolchain('gcc-cxx98', 'Unix Makefiles'),
Toolchain('gcc-lto', 'Unix Makefiles'),
Toolchain('libcxx', 'Unix Makefiles'),
Toolchain('libcxx14', 'Unix Makefiles'),
Toolchain('libcxx-no-sdk', 'Unix Makefiles'),
Toolchain('libcxx-hid', 'Unix Makefiles'),
Toolchain('libcxx-hid-fpic', 'Unix Makefiles'),
Toolchain('libcxx-fpic-hid-sections', 'Unix Makefiles'),
Toolchain('libcxx-hid-sections', 'Unix Makefiles'),
Toolchain('sanitize-address', 'Unix Makefiles'),
Toolchain('sanitize-address-cxx17', 'Unix Makefiles'),
Toolchain('sanitize-address-cxx17-pic', 'Unix Makefiles'),
Toolchain('sanitize-thread', 'Unix Makefiles'),
Toolchain('sanitize-thread-cxx17', 'Unix Makefiles'),
Toolchain('sanitize-thread-cxx17-pic', 'Unix Makefiles'),
Toolchain('arm-openwrt-linux-muslgnueabi', 'Unix Makefiles'),
Toolchain('arm-openwrt-linux-muslgnueabi-cxx14', 'Unix Makefiles'),
Toolchain('openbsd-egcc-cxx11-static-std', 'Unix Makefiles'),
Toolchain('ninja-gcc-7-cxx17-concepts', 'Ninja'),
Toolchain('ninja-gcc-8-cxx17-concepts', 'Ninja'),
Toolchain('ninja-clang-cxx17-fpic', 'Ninja'),
Toolchain('ninja-gcc-cxx17-fpic', 'Ninja'),
]
def get_by_name(name):
for x in toolchain_table:
if name == x.name:
return x
sys.exit('Internal error: toolchain not found in toolchain table')
| 53.543895
| 130
| 0.631789
| 7,499
| 53,062
| 4.418056
| 0.027737
| 0.093085
| 0.163352
| 0.105038
| 0.900607
| 0.841387
| 0.78413
| 0.73647
| 0.705925
| 0.634029
| 0
| 0.107223
| 0.165825
| 53,062
| 990
| 131
| 53.59798
| 0.641281
| 0.005013
| 0
| 0.271134
| 0
| 0.02268
| 0.472759
| 0.257729
| 0
| 0
| 0
| 0
| 0.005155
| 1
| 0.003093
| false
| 0
| 0.002062
| 0
| 0.007216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a8ff3b3ae91e768f5aeefeb794ed7343630cb32
| 37,012
|
py
|
Python
|
src/tests/quantuminspire/qiskit/test_circuit_parser.py
|
kel85uk/quantuminspire
|
f377ecaf3b55a89c4a6e42954733c3ae469ab5ca
|
[
"Apache-2.0"
] | null | null | null |
src/tests/quantuminspire/qiskit/test_circuit_parser.py
|
kel85uk/quantuminspire
|
f377ecaf3b55a89c4a6e42954733c3ae469ab5ca
|
[
"Apache-2.0"
] | 1
|
2021-01-08T10:12:31.000Z
|
2021-01-08T10:12:31.000Z
|
src/tests/quantuminspire/qiskit/test_circuit_parser.py
|
kel85uk/quantuminspire
|
f377ecaf3b55a89c4a6e42954733c3ae469ab5ca
|
[
"Apache-2.0"
] | null | null | null |
""" Quantum Inspire SDK
Copyright 2018 QuTech Delft
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from unittest.mock import Mock
import numpy as np
import qiskit
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.compiler import assemble, transpile
from qiskit.circuit import Instruction
from qiskit.assembler.run_config import RunConfig
from qiskit.qobj import QobjHeader
from quantuminspire.qiskit.circuit_parser import CircuitToString
from quantuminspire.qiskit.backend_qx import QuantumInspireBackend
from quantuminspire.exceptions import ApiError
class TestQiCircuitToString(unittest.TestCase):
def test_generate_cqasm_with_entangle_algorithm(self):
q = QuantumRegister(2)
b = ClassicalRegister(2)
circuit = QuantumCircuit(q, b)
circuit.h(q[0])
circuit.cx(q[0], q[1])
circuit.measure(q[0], b[0])
circuit.measure(q[1], b[1])
backend = QuantumInspireBackend(Mock(), Mock())
# transpiling the circuits using the transpiler_config
new_circuits = transpile(circuit, backend)
run_config = RunConfig(shots=1024, max_credits=10, memory=False)
# assembling the circuits into a qobj to be run on the backend
qiskit_job = assemble(new_circuits, backend, run_config=run_config.to_dict())
experiment = qiskit_job.experiments[0]
result = backend._generate_cqasm(experiment)
expected = "version 1.0\n" \
"# cQASM generated by QI backend for Qiskit\n" \
"qubits 2\n" \
"H q[0]\n" \
"CNOT q[0], q[1]\n"
self.assertEqual(result, expected)
@staticmethod
def _generate_cqasm_from_instructions(instructions, number_of_qubits=2, full_state_projection=True):
experiment_dict = {'instructions': instructions,
'header': {'n_qubits': number_of_qubits,
'number_of_clbits': number_of_qubits,
'compiled_circuit_qasm': ''},
'config': {'coupling_map': 'all-to-all',
'basis_gates': 'x,y,z,h,rx,ry,rz,s,cx,ccx,u1,u2,u3,id,snapshot',
'n_qubits': number_of_qubits}}
experiment = qiskit.qobj.QasmQobjExperiment.from_dict(experiment_dict)
for instruction in experiment.instructions:
if hasattr(instruction, 'params'):
# convert params to params used in qiskit instructions
qiskit_instruction = Instruction('dummy', 0, 0, instruction.params)
instruction.params = qiskit_instruction.params
simulator = QuantumInspireBackend(Mock(), Mock())
result = simulator._generate_cqasm(experiment, full_state_projection)
return result
def test_generate_cqasm_correct_output_controlled_z(self):
instructions = [{'name': 'cz', 'qubits': [0, 1]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('CZ q[0], q[1]\n' in result)
def test_generate_cqasm_correct_output_conditional_controlled_z(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 1, 'relation': '==', 'val': '0xE'},
{'conditional': 1, 'name': 'cz', 'qubits': [0, 1]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[0]\nC-CZ b[0:3], q[0], q[1]\nnot b[0]\n' in result)
def test_generate_cqasm_correct_output_controlled_not(self):
instructions = [{'name': 'cx', 'qubits': [0, 1]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('CNOT q[0], q[1]\n' in result)
def test_generate_cqasm_correct_output_conditional_controlled_not(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 1, 'relation': '==', 'val': '0xE'},
{'conditional': 1, 'name': 'cx', 'qubits': [0, 1]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[0]\nC-CNOT b[0:3], q[0], q[1]\nnot b[0]\n' in result)
def test_generate_cqasm_correct_output_toffoli(self):
instructions = [{'name': 'ccx', 'qubits': [0, 1, 2]}]
result = self._generate_cqasm_from_instructions(instructions, number_of_qubits=3)
self.assertTrue('Toffoli q[0], q[1], q[2]\n' in result)
def test_generate_cqasm_correct_output_conditional_toffoli(self):
instructions = [{'mask': '0xFF', 'name': 'bfunc', 'register': 2, 'relation': '==', 'val': '0xE'},
{'conditional': 2, 'name': 'ccx', 'qubits': [0, 1, 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[0,4,5,6,7]\nC-Toffoli b[0:7], q[0], q[1], q[2]\nnot b[0,4,5,6,7]\n' in result)
def test_generate_cqasm_correct_output_measure(self):
instructions = [{'memory': [0], 'name': 'measure', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 3)
measure_line = 'measure q[0]\n'
self.assertTrue(measure_line not in result)
def test_generate_cqasm_correct_output_measure_q0_non_fsp(self):
instructions = [{'memory': [0], 'name': 'measure', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 3, False)
measure_line = 'measure q[0]\n'
self.assertTrue(measure_line in result)
def test_generate_cqasm_correct_output_measure_q1_non_fsp(self):
instructions = [{'memory': [0], 'name': 'measure', 'qubits': [1]}]
result = self._generate_cqasm_from_instructions(instructions, 3, False)
measure_line = 'measure q[1]\n'
self.assertTrue(measure_line in result)
def test_generate_cqasm_correct_output_hadamard(self):
instructions = [{'name': 'h', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('H q[0]\n' in result)
def test_generate_cqasm_correct_output_conditional_hadamard(self):
instructions = [{'mask': '0xFF', 'name': 'bfunc', 'register': 3, 'relation': '==', 'val': '0xE'},
{'conditional': 3, 'name': 'h', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[0,4,5,6,7]\nC-H b[0:7], q[0]\nnot b[0,4,5,6,7]\n' in result)
def test_generate_cqasm_correct_output_barrier(self):
instructions = [{'name': 'barrier', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertFalse('barrier' in result)
def test_generate_cqasm_correct_output_conditional_barrier(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 4, 'relation': '==', 'val': '0xE'},
{'conditional': 4, 'name': 'barrier', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertFalse('barrier' in result)
def test_generate_cqasm_correct_output_identity(self):
instructions = [{'name': 'id', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('I q[0]\n' in result)
def test_generate_cqasm_correct_output_conditional_identity(self):
instructions = [{'mask': '0xFF', 'name': 'bfunc', 'register': 5, 'relation': '==', 'val': '0xE'},
{'conditional': 5, 'name': 'id', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[0,4,5,6,7]\nC-I b[0:7], q[0]\nnot b[0,4,5,6,7]\n' in result)
def test_generate_cqasm_correct_output_gate_s(self):
instructions = [{'name': 's', 'qubits': [1]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('S q[1]\n' in result)
def test_generate_cqasm_correct_output_conditional_gate_s(self):
instructions = [{'mask': '0x1FF', 'name': 'bfunc', 'register': 5, 'relation': '==', 'val': '0xB'},
{'conditional': 5, 'name': 's', 'qubits': [2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,4,5,6,7,8]\nC-S b[0:8], q[2]\nnot b[2,4,5,6,7,8]\n' in result)
def test_generate_cqasm_correct_output_gate_sdag(self):
instructions = [{'name': 'sdg', 'qubits': [2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Sdag q[2]\n' in result)
def test_generate_cqasm_correct_output_conditional_gate_sdag(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 6, 'relation': '==', 'val': '0xE'},
{'conditional': 6, 'name': 'sdg', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[0]\nC-Sdag b[0:3], q[0]\nnot b[0]\n' in result)
def test_generate_cqasm_correct_output_gate_swap(self):
instructions = [{'name': 'swap', 'qubits': [2, 3]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('SWAP q[2], q[3]\n' in result)
def test_generate_cqasm_correct_output_conditional_gate_swap(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 7, 'relation': '==', 'val': '0xE'},
{'conditional': 7, 'name': 'swap', 'qubits': [0, 1]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[0]\nC-SWAP b[0:3], q[0], q[1]\nnot b[0]\n' in result)
def test_generate_cqasm_correct_output_gate_t(self):
instructions = [{'name': 't', 'qubits': [2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('T q[2]\n' in result)
def test_generate_cqasm_correct_output_conditional_gate_t(self):
instructions = [{'mask': '0x1FF', 'name': 'bfunc', 'register': 8, 'relation': '==', 'val': '0xB'},
{'conditional': 8, 'name': 't', 'qubits': [1]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,4,5,6,7,8]\nC-T b[0:8], q[1]\nnot b[2,4,5,6,7,8]\n' in result)
def test_generate_cqasm_correct_output_gate_tdag(self):
instructions = [{'name': 'tdg', 'qubits': [2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Tdag q[2]\n' in result)
def test_generate_cqasm_correct_output_conditional_gate_tdag(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 9, 'relation': '==', 'val': '0xE'},
{'conditional': 9, 'name': 'tdg', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[0]\nC-Tdag b[0:3], q[0]\nnot b[0]\n' in result)
def test_generate_cqasm_correct_output_gate_x(self):
instructions = [{'name': 'x', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('X q[0]\n' in result)
def test_generate_cqasm_correct_output_conditional_gate_x(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 9, 'relation': '==', 'val': '0xE'},
{'conditional': 9, 'name': 'x', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[0]\nC-X b[0:3], q[0]\nnot b[0]\n' in result)
def test_generate_cqasm_correct_output_gate_y(self):
instructions = [{'name': 'y', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Y q[0]\n' in result)
def test_generate_cqasm_correct_output_conditional_gate_y(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 9, 'relation': '==', 'val': '0x1'},
{'conditional': 9, 'name': 'y', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[1,2,3]\nC-Y b[0:3], q[0]\nnot b[1,2,3]\n' in result)
def test_generate_cqasm_correct_output_gate_z(self):
instructions = [{'name': 'z', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Z q[0]\n' in result)
def test_generate_cqasm_correct_output_conditional_gate_z(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 9, 'relation': '==', 'val': '0x3'},
{'conditional': 9, 'name': 'z', 'qubits': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Z b[0:3], q[0]\nnot b[2,3]\n' in result)
def test_generate_cqasm_correct_output_gate_u(self):
instructions = [{'name': 'u', 'qubits': [0], 'params': [0, 0, np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rz q[0], 1.570796\n' in result)
instructions = [{'name': 'u', 'qubits': [0], 'params': [-np.pi / 2, 0, 0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Ry q[0], -1.570796\n' in result)
instructions = [{'name': 'u', 'qubits': [0], 'params': [np.pi / 4, np.pi / 2, -np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rz q[0], -1.570796\nRy q[0], 0.785398\nRz q[0], 1.570796\n' in result)
instructions = [{'name': 'u', 'qubits': [1], 'params': [0.123456, 0.654321, -0.333333]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rz q[1], -0.333333\nRy q[1], 0.123456\nRz q[1], 0.654321\n' in result)
def test_generate_cqasm_correct_output_conditional_gate_u(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 10, 'relation': '==', 'val': '0x3'},
{'conditional': 10, 'name': 'u', 'qubits': [0], 'params': [0, 0, np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Rz b[0:3], q[0], 1.570796\nnot b[2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 10, 'relation': '==', 'val': '0x3'},
{'conditional': 10, 'name': 'u', 'qubits': [0], 'params': [-np.pi / 2, 0, 0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Ry b[0:3], q[0], -1.570796\nnot b[2,3]' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 10, 'relation': '==', 'val': '0x3'},
{'conditional': 10, 'name': 'u', 'qubits': [0], 'params': [np.pi / 4, np.pi / 2, -np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Rz b[0:3], q[0], -1.570796\nC-Ry b[0:3], q[0], 0.785398\nC-Rz b[0:3],'
' q[0], 1.570796\nnot b[2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 10, 'relation': '==', 'val': '0x3'},
{'conditional': 10, 'name': 'u', 'qubits': [1], 'params': [0.123456, 0.654321, -0.333333]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Rz b[0:3], q[1], -0.333333\nC-Ry b[0:3], q[1], 0.123456\nC-Rz b[0:3],'
' q[1], 0.654321\nnot b[2,3]\n' in result)
def test_generate_cqasm_correct_output_gate_u1(self):
instructions = [{'name': 'u1', 'qubits': [0], 'params': [np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rz q[0], 1.570796\n' in result)
instructions = [{'name': 'u1', 'qubits': [1], 'params': [np.pi / 4]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rz q[1], 0.785398\n' in result)
instructions = [{'name': 'u1', 'qubits': [2], 'params': [-np.pi / 4]}]
result = self._generate_cqasm_from_instructions(instructions, 3)
self.assertTrue('Rz q[2], -0.785398\n' in result)
instructions = [{'name': 'u1', 'qubits': [2], 'params': [0.123456]}]
result = self._generate_cqasm_from_instructions(instructions, 3)
self.assertTrue('Rz q[2], 0.123456\n' in result)
instructions = [{'name': 'u1', 'qubits': [0], 'params': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertFalse('q[0]' in result)
def test_generate_cqasm_correct_output_conditional_gate_u1(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 11, 'relation': '==', 'val': '0x3'},
{'conditional': 11, 'name': 'u1', 'qubits': [0], 'params': [np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Rz b[0:3], q[0], 1.570796\nnot b[2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 11, 'relation': '==', 'val': '0x3'},
{'conditional': 11, 'name': 'u1', 'qubits': [1], 'params': [np.pi / 4]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Rz b[0:3], q[1], 0.785398\nnot b[2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 11, 'relation': '==', 'val': '0x3'},
{'conditional': 11, 'name': 'u1', 'qubits': [2], 'params': [-np.pi / 4]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Rz b[0:3], q[2], -0.785398\nnot b[2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 11, 'relation': '==', 'val': '0x3'},
{'conditional': 11, 'name': 'u1', 'qubits': [2], 'params': [0.123456]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Rz b[0:3], q[2], 0.123456\nnot b[2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 11, 'relation': '==', 'val': '0x3'},
{'conditional': 11, 'name': 'u1', 'qubits': [0], 'params': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertFalse('q[0]' in result)
def test_generate_cqasm_correct_output_gate_u2(self):
instructions = [{'name': 'u2', 'qubits': [0], 'params': [np.pi, np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rz q[0], 1.570796\nRy q[0], 1.570796\nRz q[0], 3.141593\n' in result)
instructions = [{'name': 'u2', 'qubits': [1], 'params': [0, np.pi]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rz q[1], 3.141593\nRy q[1], 1.570796\n' in result)
instructions = [{'name': 'u2', 'qubits': [2], 'params': [0.123456, -0.654321]}]
result = self._generate_cqasm_from_instructions(instructions, 3)
self.assertTrue('Rz q[2], -0.654321\nRy q[2], 1.570796\nRz q[2], 0.123456\n' in result)
instructions = [{'name': 'u2', 'qubits': [0], 'params': [0, 0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Ry q[0], 1.570796\n' in result)
def test_generate_cqasm_correct_output_conditional_gate_u2(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 12, 'relation': '==', 'val': '0x3'},
{'conditional': 12, 'name': 'u2', 'qubits': [0], 'params': [np.pi, np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Rz b[0:3], q[0], 1.570796\nC-Ry b[0:3], q[0], 1.570796\nC-Rz b[0:3], q[0],'
' 3.141593\nnot b[2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 12, 'relation': '==', 'val': '0x3'},
{'conditional': 12, 'name': 'u2', 'qubits': [1], 'params': [0, np.pi]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Rz b[0:3], q[1], 3.141593\nC-Ry b[0:3], q[1], 1.570796\nnot b[2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 12, 'relation': '==', 'val': '0x3'},
{'conditional': 12, 'name': 'u2', 'qubits': [2], 'params': [0.123456, -0.654321]}]
result = self._generate_cqasm_from_instructions(instructions, 3)
self.assertTrue('not b[2,3]\nC-Rz b[0:3], q[2], -0.654321\nC-Ry b[0:3], q[2], 1.570796\nC-Rz b[0:3], q[2],'
' 0.123456\nnot b[2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 12, 'relation': '==', 'val': '0x3'},
{'conditional': 12, 'name': 'u2', 'qubits': [0], 'params': [0, 0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Ry b[0:3], q[0], 1.570796\nnot b[2,3]\n' in result)
def test_generate_cqasm_correct_output_gate_u3(self):
instructions = [{'name': 'u3', 'qubits': [0], 'params': [1, 2, 3]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rz q[0], 3.000000\nRy q[0], 1.000000\nRz q[0], 2.000000\n' in result)
instructions = [{'name': 'u3', 'qubits': [1], 'params': [0.123456, 0.654321, -0.333333]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rz q[1], -0.333333\nRy q[1], 0.123456\nRz q[1], 0.654321\n' in result)
instructions = [{'name': 'u3', 'qubits': [1], 'params': [0, 0.654321, 0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rz q[1], 0.654321\n' in result)
instructions = [{'name': 'u3', 'qubits': [2], 'params': [0.654321, 0, 0]}]
result = self._generate_cqasm_from_instructions(instructions, 3)
self.assertTrue('Ry q[2], 0.654321\n' in result)
instructions = [{'name': 'u3', 'qubits': [0], 'params': [0, 0, 0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertFalse('q[0]' in result)
def test_generate_cqasm_correct_output_conditional_gate_u3(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 13, 'relation': '==', 'val': '0x3'},
{'conditional': 13, 'name': 'u3', 'qubits': [0], 'params': [1, 2, 3]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Rz b[0:3], q[0], 3.000000\nC-Ry b[0:3], q[0], 1.000000\nC-Rz b[0:3], q[0],'
' 2.000000\nnot b[2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 13, 'relation': '==', 'val': '0x3'},
{'conditional': 13, 'name': 'u3', 'qubits': [1], 'params': [0.123456, 0.654321, -0.333333]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Rz b[0:3], q[1], -0.333333\nC-Ry b[0:3], q[1], 0.123456\nC-Rz b[0:3], q[1],'
' 0.654321\nnot b[2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 13, 'relation': '==', 'val': '0x3'},
{'conditional': 13, 'name': 'u3', 'qubits': [1], 'params': [0, 0.654321, 0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Rz b[0:3], q[1], 0.654321\nnot b[2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 13, 'relation': '==', 'val': '0x3'},
{'conditional': 13, 'name': 'u3', 'qubits': [2], 'params': [0.654321, 0, 0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Ry b[0:3], q[2], 0.654321\nnot b[2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 13, 'relation': '==', 'val': '0x1'},
{'conditional': 13, 'name': 'u3', 'qubits': [0], 'params': [0, 0, 0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertFalse('q[0]' in result)
def test_generate_cqasm_correct_output_sympy_special_cases(self):
# Zero
instructions = [{'name': 'rx', 'qubits': [1], 'params': [0]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rx q[1], 0.000000\n' in result)
# One
instructions = [{'name': 'rx', 'qubits': [1], 'params': [1]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rx q[1], 1.000000\n' in result)
# Integer
instructions = [{'name': 'rx', 'qubits': [1], 'params': [2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rx q[1], 2.000000\n' in result)
# NegativeOne
instructions = [{'name': 'rx', 'qubits': [1], 'params': [-1]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rx q[1], -1.000000\n' in result)
# Float
instructions = [{'name': 'rx', 'qubits': [0], 'params': [np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rx q[0], 1.570796\n' in result)
def test_generate_cqasm_correct_output_rotation_x(self):
instructions = [{'name': 'rx', 'qubits': [0], 'params': [np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rx q[0], 1.570796\n' in result)
instructions = [{'name': 'rx', 'qubits': [1], 'params': [0.123456]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rx q[1], 0.123456\n' in result)
def test_generate_cqasm_correct_output_conditional_rotation_x(self):
instructions = [{'mask': '0xFF', 'name': 'bfunc', 'register': 14, 'relation': '==', 'val': '0xE'},
{'conditional': 14, 'name': 'rx', 'qubits': [0], 'params': [np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[0,4,5,6,7]\nC-Rx b[0:7], q[0], 1.570796\nnot b[0,4,5,6,7]\n' in result)
instructions = [{'mask': '0xFF', 'name': 'bfunc', 'register': 14, 'relation': '==', 'val': '0xE'},
{'conditional': 14, 'name': 'rx', 'qubits': [1], 'params': [0.123456]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[0,4,5,6,7]\nC-Rx b[0:7], q[1], 0.123456\nnot b[0,4,5,6,7]\n' in result)
def test_generate_cqasm_correct_output_rotation_y(self):
instructions = [{'name': 'ry', 'qubits': [0], 'params': [np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Ry q[0], 1.570796\n' in result)
instructions = [{'name': 'ry', 'qubits': [1], 'params': [0.654321]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Ry q[1], 0.654321\n' in result)
def test_generate_cqasm_correct_output_conditional_rotation_y(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 15, 'relation': '==', 'val': '0x3'},
{'conditional': 15, 'name': 'ry', 'qubits': [0], 'params': [np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Ry b[0:3], q[0], 1.570796\nnot b[2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 15, 'relation': '==', 'val': '0x3'},
{'conditional': 15, 'name': 'ry', 'qubits': [1], 'params': [0.654321]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[2,3]\nC-Ry b[0:3], q[1], 0.654321\nnot b[2,3]\n' in result)
def test_generate_cqasm_correct_output_rotation_z(self):
instructions = [{'name': 'rz', 'qubits': [0], 'params': [np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rz q[0], 1.570796\n' in result)
instructions = [{'name': 'rz', 'qubits': [1], 'params': [-np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('Rz q[1], -1.570796\n' in result)
def test_generate_cqasm_correct_output_conditional_rotation_z(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 16, 'relation': '==', 'val': '0x1'},
{'conditional': 16, 'name': 'rz', 'qubits': [0], 'params': [np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[1,2,3]\nC-Rz b[0:3], q[0], 1.570796\nnot b[1,2,3]\n' in result)
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 16, 'relation': '==', 'val': '0x1'},
{'conditional': 16, 'name': 'rz', 'qubits': [1], 'params': [-np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[1,2,3]\nC-Rz b[0:3], q[1], -1.570796\nnot b[1,2,3]\n' in result)
def test_generate_cqasm_correct_output_unknown_gate(self):
instructions = [{'name': 'bla', 'qubits': [1], 'params': [-np.pi / 2]}]
self.assertRaisesRegex(ApiError, 'Gate bla not supported', self._generate_cqasm_from_instructions,
instructions, 2)
def test_generate_cqasm_correct_output_unknown_controlled_gate(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 17, 'relation': '==', 'val': '0x1'},
{'conditional': 17, 'name': 'bla', 'qubits': [1], 'params': [-np.pi / 2]}]
self.assertRaisesRegex(ApiError, 'Conditional gate c-bla not supported',
self._generate_cqasm_from_instructions, instructions, 2)
def test_generate_cqasm_correct_output_no_bit_negation(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 18, 'relation': '==', 'val': '0xF'},
{'conditional': 18, 'name': 'rx', 'qubits': [1], 'params': [-np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('C-Rx b[0:3], q[1], -1.570796\n' in result)
self.assertFalse('not\n' in result)
def test_generate_cqasm_correct_output_one_bit_condition(self):
instructions = [{'mask': '0x1', 'name': 'bfunc', 'register': 19, 'relation': '==', 'val': '0x1'},
{'conditional': 19, 'name': 'rx', 'qubits': [1], 'params': [-np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('C-Rx b[0], q[1], -1.570796\n' in result)
self.assertFalse('not\n' in result)
instructions = [{'mask': '0x2', 'name': 'bfunc', 'register': 19, 'relation': '==', 'val': '0x2'},
{'conditional': 19, 'name': 'rx', 'qubits': [1], 'params': [-np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('C-Rx b[1], q[1], -1.570796\n' in result)
self.assertFalse('not\n' in result)
instructions = [{'mask': '0x40', 'name': 'bfunc', 'register': 19, 'relation': '==', 'val': '0x40'},
{'conditional': 19, 'name': 'rx', 'qubits': [1], 'params': [-np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('C-Rx b[6], q[1], -1.570796\n' in result)
self.assertFalse('not\n' in result)
instructions = [{'mask': '0x40', 'name': 'bfunc', 'register': 19, 'relation': '==', 'val': '0x0'},
{'conditional': 19, 'name': 'rx', 'qubits': [1], 'params': [-np.pi / 2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[6]\nC-Rx b[6], q[1], -1.570796\nnot b[6]\n' in result)
def test_generate_cqasm_correct_output_more_bit_condition(self):
instructions = [{'mask': '0x38', 'name': 'bfunc', 'register': 20, 'relation': '==', 'val': '0x18'},
{'conditional': 20, 'name': 'y', 'qubits': [2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[5]\nC-Y b[3:5], q[2]\nnot b[5]\n' in result)
instructions = [{'mask': '0xFE', 'name': 'bfunc', 'register': 20, 'relation': '==', 'val': '0x18'},
{'conditional': 20, 'name': 'y', 'qubits': [2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[1,2,5,6,7]\nC-Y b[1:7], q[2]\nnot b[1,2,5,6,7]\n' in result)
instructions = [{'mask': '0xFE', 'name': 'bfunc', 'register': 20, 'relation': '==', 'val': '0x36'},
{'conditional': 20, 'name': 'y', 'qubits': [2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[3,6,7]\nC-Y b[1:7], q[2]\nnot b[3,6,7]\n' in result)
instructions = [{'mask': '0x60', 'name': 'bfunc', 'register': 20, 'relation': '==', 'val': '0x40'},
{'conditional': 20, 'name': 'y', 'qubits': [2]}]
result = self._generate_cqasm_from_instructions(instructions, 2)
self.assertTrue('not b[5]\nC-Y b[5:6], q[2]\nnot b[5]\n' in result)
def test_generate_cqasm_correct_output_unknown_type(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 18, 'relation': '!=', 'val': '0x1'},
{'conditional': 18, 'name': 'rx', 'qubits': [1], 'params': [-np.pi / 2]}]
self.assertRaisesRegex(ApiError, 'Conditional statement with relation != not supported',
self._generate_cqasm_from_instructions, instructions, 2)
def test_generate_cqasm_correct_output_no_mask(self):
instructions = [{'mask': '0x0', 'name': 'bfunc', 'register': 18, 'relation': '==', 'val': '0x1'},
{'conditional': 18, 'name': 'rx', 'qubits': [1], 'params': [-np.pi / 2]}]
self.assertRaisesRegex(ApiError, 'Conditional statement rx without a mask',
self._generate_cqasm_from_instructions, instructions, 2)
def test_generate_cqasm_register_no_match(self):
instructions = [{'mask': '0xF', 'name': 'bfunc', 'register': 1, 'relation': '==', 'val': '0x3'},
{'conditional': 2, 'name': 'rx', 'qubits': [1], 'params': [-np.pi / 2]}]
self.assertRaisesRegex(ApiError, 'Conditional not found: reg_idx = 2',
self._generate_cqasm_from_instructions, instructions, 2)
def test_get_mask_data(self):
mask = 0
lowest_mask_bit, mask_length = CircuitToString.get_mask_data(mask)
self.assertEqual(lowest_mask_bit, -1)
self.assertEqual(mask_length, 0)
mask = 56
lowest_mask_bit, mask_length = CircuitToString.get_mask_data(mask)
self.assertEqual(lowest_mask_bit, 3)
self.assertEqual(mask_length, 3)
mask = 1
lowest_mask_bit, mask_length = CircuitToString.get_mask_data(mask)
self.assertEqual(lowest_mask_bit, 0)
self.assertEqual(mask_length, 1)
mask = 255
lowest_mask_bit, mask_length = CircuitToString.get_mask_data(mask)
self.assertEqual(lowest_mask_bit, 0)
self.assertEqual(mask_length, 8)
mask = 510
lowest_mask_bit, mask_length = CircuitToString.get_mask_data(mask)
self.assertEqual(lowest_mask_bit, 1)
self.assertEqual(mask_length, 8)
mask = 128
lowest_mask_bit, mask_length = CircuitToString.get_mask_data(mask)
self.assertEqual(lowest_mask_bit, 7)
self.assertEqual(mask_length, 1)
mask = 192
lowest_mask_bit, mask_length = CircuitToString.get_mask_data(mask)
self.assertEqual(lowest_mask_bit, 6)
self.assertEqual(mask_length, 2)
| 58.749206
| 119
| 0.600265
| 4,858
| 37,012
| 4.397283
| 0.05846
| 0.094935
| 0.078785
| 0.134398
| 0.840184
| 0.833723
| 0.820756
| 0.794354
| 0.785554
| 0.768327
| 0
| 0.065592
| 0.217362
| 37,012
| 629
| 120
| 58.842607
| 0.671868
| 0.020939
| 0
| 0.362525
| 0
| 0.095723
| 0.226989
| 0.002485
| 0
| 0
| 0.009
| 0
| 0.238289
| 1
| 0.11609
| false
| 0
| 0.02444
| 0
| 0.144603
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0aa22a54205ac52b1e8ce89a2d70a03a4db0f5e9
| 1,360
|
py
|
Python
|
train-mtcnn-zq-mxnet/core/metric_onlylandmark.py
|
zzzkk2009/anti-spoofing
|
ac3992547c430619e236b338575109d7ecbba654
|
[
"MIT"
] | 13
|
2018-12-19T07:43:46.000Z
|
2020-06-30T13:10:08.000Z
|
train-mtcnn-zq-mxnet/core/metric_onlylandmark.py
|
zzzkk2009/anti-spoofing
|
ac3992547c430619e236b338575109d7ecbba654
|
[
"MIT"
] | 1
|
2020-04-28T02:18:29.000Z
|
2020-04-28T02:18:29.000Z
|
train-mtcnn-zq-mxnet/core/metric_onlylandmark.py
|
zzzkk2009/anti-spoofing
|
ac3992547c430619e236b338575109d7ecbba654
|
[
"MIT"
] | 5
|
2018-12-19T07:43:48.000Z
|
2020-06-15T12:14:41.000Z
|
import mxnet as mx
import numpy as np
from config import config
class LANDMARK_MSE(mx.metric.EvalMetric):
def __init__(self):
super(LANDMARK_MSE, self).__init__('lmL2')
def update(self,labels, preds):
# output: landmark_pred_output, landmark_keep_inds
# label: landmark_target
pred_delta = preds[0].asnumpy()
landmark_target = labels[0].asnumpy()
landmark_keep = preds[1].asnumpy()
keep = np.where(landmark_keep == 1)[0]
pred_delta = pred_delta[keep]
landmark_target = landmark_target[keep]
e = (pred_delta - landmark_target)**2
error = np.sum(e)
self.sum_metric += error
self.num_inst += e.size
class LANDMARK_L1(mx.metric.EvalMetric):
def __init__(self):
super(LANDMARK_L1, self).__init__('lmL1')
def update(self,labels, preds):
# output: landmark_pred_output, landmark_keep_inds
# label: landmark_target
pred_delta = preds[0].asnumpy()
landmark_target = labels[0].asnumpy()
landmark_keep = preds[1].asnumpy()
keep = np.where(landmark_keep == 1)[0]
pred_delta = pred_delta[keep]
landmark_target = landmark_target[keep]
e = abs(pred_delta - landmark_target)
error = np.sum(e)
self.sum_metric += error
self.num_inst += e.size
| 29.565217
| 58
| 0.636765
| 174
| 1,360
| 4.666667
| 0.241379
| 0.172414
| 0.078818
| 0.051724
| 0.800493
| 0.800493
| 0.800493
| 0.800493
| 0.697044
| 0.697044
| 0
| 0.014793
| 0.254412
| 1,360
| 46
| 59
| 29.565217
| 0.785996
| 0.105147
| 0
| 0.709677
| 0
| 0
| 0.006595
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0.096774
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0abe5f0d64ab8c36181deafb9170cfc022091d47
| 195
|
py
|
Python
|
tests/conftest.py
|
pranali139/pyinvestigate
|
a182e73a750f03e906d9b25842d556db8d2fd54f
|
[
"MIT"
] | 63
|
2015-01-26T20:47:50.000Z
|
2021-12-09T05:58:11.000Z
|
tests/conftest.py
|
pranali139/pyinvestigate
|
a182e73a750f03e906d9b25842d556db8d2fd54f
|
[
"MIT"
] | 16
|
2016-04-21T17:54:40.000Z
|
2021-04-19T10:06:13.000Z
|
tests/conftest.py
|
pranali139/pyinvestigate
|
a182e73a750f03e906d9b25842d556db8d2fd54f
|
[
"MIT"
] | 32
|
2015-07-22T17:30:33.000Z
|
2021-09-16T21:12:49.000Z
|
import os
import pytest
import investigate # use the local path, instead of what has been installed
@pytest.fixture
def inv():
return investigate.Investigate(os.environ['INVESTIGATE_KEY'])
| 21.666667
| 75
| 0.779487
| 27
| 195
| 5.592593
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14359
| 195
| 8
| 76
| 24.375
| 0.904192
| 0.276923
| 0
| 0
| 0
| 0
| 0.107914
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.5
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
0ad2df3d27ff5549c9fb77ca78531d499cd40237
| 262
|
py
|
Python
|
django_query_profiler/django/db/backends/oracle/base.py
|
sonej/django-query-profiler
|
4afe3694ded26d7ba0b435f5666e990b668d85b5
|
[
"BSD-3-Clause"
] | 97
|
2020-03-03T01:20:35.000Z
|
2022-03-23T14:06:09.000Z
|
django_query_profiler/django/db/backends/oracle/base.py
|
sonej/django-query-profiler
|
4afe3694ded26d7ba0b435f5666e990b668d85b5
|
[
"BSD-3-Clause"
] | 24
|
2020-03-06T17:35:08.000Z
|
2022-02-09T20:06:05.000Z
|
django_query_profiler/django/db/backends/oracle/base.py
|
sonej/django-query-profiler
|
4afe3694ded26d7ba0b435f5666e990b668d85b5
|
[
"BSD-3-Clause"
] | 9
|
2020-03-22T18:17:09.000Z
|
2022-01-31T18:59:11.000Z
|
import django.db.backends.oracle.base as oracle_base
from django_query_profiler.django.db.backends.database_wrapper_mixin import QueryProfilerDatabaseWrapperMixin
class DatabaseWrapper(oracle_base.DatabaseWrapper, QueryProfilerDatabaseWrapperMixin):
pass
| 32.75
| 109
| 0.877863
| 28
| 262
| 8
| 0.607143
| 0.133929
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072519
| 262
| 7
| 110
| 37.428571
| 0.921811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
0adaffb8376f47c284934706d7af6d76244163fb
| 34
|
py
|
Python
|
spydashserver/__init__.py
|
ibutra/SpyDashServer
|
d12e6aac4efe487a3b6e950236ed2ef5d464178c
|
[
"MIT"
] | 2
|
2015-11-13T14:43:37.000Z
|
2015-11-13T14:44:28.000Z
|
spydashserver/__init__.py
|
ibutra/SpyDashServer
|
d12e6aac4efe487a3b6e950236ed2ef5d464178c
|
[
"MIT"
] | null | null | null |
spydashserver/__init__.py
|
ibutra/SpyDashServer
|
d12e6aac4efe487a3b6e950236ed2ef5d464178c
|
[
"MIT"
] | null | null | null |
from .server import SpyDashServer
| 17
| 33
| 0.852941
| 4
| 34
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0afd9bf4057afd5b1419918ef2c50a9c9cc1d7cd
| 5,618
|
py
|
Python
|
tf_quant_finance/experimental/instruments/swaption_test.py
|
alexanu/tf-quant-finance
|
d0eb0e778d2422c6190844ef8f8c457ae25f9265
|
[
"Apache-2.0"
] | 1
|
2021-09-01T06:27:02.000Z
|
2021-09-01T06:27:02.000Z
|
tf_quant_finance/experimental/instruments/swaption_test.py
|
alexanu/tf-quant-finance
|
d0eb0e778d2422c6190844ef8f8c457ae25f9265
|
[
"Apache-2.0"
] | null | null | null |
tf_quant_finance/experimental/instruments/swaption_test.py
|
alexanu/tf-quant-finance
|
d0eb0e778d2422c6190844ef8f8c457ae25f9265
|
[
"Apache-2.0"
] | 1
|
2021-09-01T06:26:57.000Z
|
2021-09-01T06:26:57.000Z
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swaption.py."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
dates = tff.experimental.dates
instruments = tff.experimental.instruments
@test_util.run_all_in_graph_and_eager_modes
class SwaptionTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('DoublePrecision', np.float64),
)
def test_swaption_correctness(self, dtype):
notional = 1.e6
maturity_date = dates.convert_to_date_tensor([(2025, 2, 8)])
start_date = dates.convert_to_date_tensor([(2022, 2, 8)])
expiry_date = dates.convert_to_date_tensor([(2022, 2, 8)])
valuation_date = dates.convert_to_date_tensor([(2020, 2, 8)])
period3m = dates.periods.PeriodTensor(3, dates.PeriodType.MONTH)
period6m = dates.periods.PeriodTensor(6, dates.PeriodType.MONTH)
fix_spec = instruments.FixedCouponSpecs(
coupon_frequency=period6m, currency='usd', notional=notional,
coupon_rate=0.03134,
daycount_convention=instruments.DayCountConvention.ACTUAL_365,
businessday_rule=dates.BusinessDayConvention.NONE)
flt_spec = instruments.FloatCouponSpecs(
coupon_frequency=period3m, reference_rate_term=period3m,
reset_frequency=period3m, currency='usd', notional=notional,
businessday_rule=dates.BusinessDayConvention.NONE,
coupon_basis=0., coupon_multiplier=1.,
daycount_convention=instruments.DayCountConvention.ACTUAL_365)
swap = instruments.InterestRateSwap(start_date, maturity_date,
[fix_spec], [flt_spec],
dtype=dtype)
swaption = instruments.Swaption(swap, expiry_date, dtype=dtype)
curve_dates = valuation_date + dates.periods.PeriodTensor(
[1, 2, 3, 5, 7, 10, 30], dates.PeriodType.YEAR)
reference_curve = instruments.RateCurve(
curve_dates,
np.array([
0.02834814, 0.03077457, 0.03113739, 0.03130794, 0.03160892,
0.03213901, 0.03257991
], dtype=np.float64),
valuation_date=valuation_date,
dtype=np.float64)
market = instruments.InterestRateMarket(
reference_curve=reference_curve, discount_curve=reference_curve)
price = self.evaluate(
swaption.price(
valuation_date,
market,
model=instruments.InterestRateModelType.LOGNORMAL_RATE,
pricing_context=0.5))
np.testing.assert_allclose(price, 24145.254011, atol=1e-6)
@parameterized.named_parameters(
('DoublePrecision', np.float64),
)
def test_swaption_many(self, dtype):
notional = 1.e6
maturity_date = dates.convert_to_date_tensor([(2025, 2, 8), (2025, 2, 8)])
start_date = dates.convert_to_date_tensor([(2022, 2, 8), (2022, 2, 8)])
expiry_date = dates.convert_to_date_tensor([(2022, 2, 8), (2022, 2, 8)])
valuation_date = dates.convert_to_date_tensor([(2020, 2, 8)])
period3m = dates.periods.PeriodTensor(3, dates.PeriodType.MONTH)
period6m = dates.periods.PeriodTensor(6, dates.PeriodType.MONTH)
fix_spec = instruments.FixedCouponSpecs(
coupon_frequency=period6m, currency='usd', notional=notional,
coupon_rate=0.03134,
daycount_convention=instruments.DayCountConvention.ACTUAL_365,
businessday_rule=dates.BusinessDayConvention.NONE)
flt_spec = instruments.FloatCouponSpecs(
coupon_frequency=period3m, reference_rate_term=period3m,
reset_frequency=period3m, currency='usd', notional=notional,
businessday_rule=dates.BusinessDayConvention.NONE,
coupon_basis=0., coupon_multiplier=1.,
daycount_convention=instruments.DayCountConvention.ACTUAL_365)
swap = instruments.InterestRateSwap(start_date, maturity_date,
[fix_spec, fix_spec],
[flt_spec, flt_spec],
dtype=dtype)
swaption = instruments.Swaption(swap, expiry_date, dtype=dtype)
curve_dates = valuation_date + dates.periods.PeriodTensor(
[1, 2, 3, 5, 7, 10, 30], dates.PeriodType.YEAR)
reference_curve = instruments.RateCurve(
curve_dates,
np.array([
0.02834814, 0.03077457, 0.03113739, 0.03130794, 0.03160892,
0.03213901, 0.03257991
], dtype=np.float64),
valuation_date=valuation_date,
dtype=np.float64)
market = instruments.InterestRateMarket(
reference_curve=reference_curve, discount_curve=reference_curve)
price = self.evaluate(
swaption.price(
valuation_date,
market,
model=instruments.InterestRateModelType.LOGNORMAL_RATE,
pricing_context=[0.5, 0.5]))
np.testing.assert_allclose(price, [24145.254011, 24145.254011], atol=1e-6)
if __name__ == '__main__':
tf.test.main()
| 41.308824
| 95
| 0.691883
| 658
| 5,618
| 5.712766
| 0.290274
| 0.005853
| 0.034052
| 0.038308
| 0.775472
| 0.768822
| 0.768822
| 0.768822
| 0.768822
| 0.711891
| 0
| 0.072748
| 0.209683
| 5,618
| 135
| 96
| 41.614815
| 0.773874
| 0.112496
| 0
| 0.72549
| 0
| 0
| 0.010068
| 0
| 0
| 0
| 0
| 0
| 0.019608
| 1
| 0.019608
| false
| 0
| 0.04902
| 0
| 0.078431
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c14dc495812c2e84a6e43f02a4ade678d80ef91
| 10,979
|
py
|
Python
|
tests/feature/test_wrong.py
|
hristiy4n/pytest-bdd
|
76ed2ece2aa822cdf764b37a8d042227db9ff9c9
|
[
"MIT"
] | null | null | null |
tests/feature/test_wrong.py
|
hristiy4n/pytest-bdd
|
76ed2ece2aa822cdf764b37a8d042227db9ff9c9
|
[
"MIT"
] | null | null | null |
tests/feature/test_wrong.py
|
hristiy4n/pytest-bdd
|
76ed2ece2aa822cdf764b37a8d042227db9ff9c9
|
[
"MIT"
] | null | null | null |
"""Test wrong feature syntax."""
import textwrap
def test_when_in_background(testdir):
testdir.makefile(
".feature",
wrong=textwrap.dedent(
"""\
Feature: When in background
Background:
Given I don't always write when in the background, but
When I do
Scenario: When in background
Then its fine
When I do it again
Then its wrong
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import scenario
@scenario("wrong.feature", "When in background")
def test_wrong():
pass
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(error=1)
result.stdout.fnmatch_lines("*FeatureError: Background section can only contain Given steps.*")
def test_then_first(testdir):
testdir.makefile(
".feature",
wrong=textwrap.dedent(
"""\
Feature: Then first
Scenario: Then first
Then it won't work
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import scenario
@scenario("wrong.feature", "Then first")
def test_wrong():
pass
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(error=1)
result.stdout.fnmatch_lines("*FeatureError: Then steps must follow Given or When steps.*")
def test_given_after_when(testdir):
testdir.makefile(
".feature",
wrong=textwrap.dedent(
"""\
Feature: Given after when
Scenario: Given after When
Given something
When something else
Given won't work
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import scenario
@scenario("wrong.feature", "Given after When")
def test_wrong():
pass
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(error=1)
result.stdout.fnmatch_lines("*FeatureError: Given steps must be the first within the Scenario.*")
def test_given_after_then(testdir):
testdir.makefile(
".feature",
wrong=textwrap.dedent(
"""\
Feature: Given after then
Scenario: Given after Then
Given something
When something else
Then nevermind
Given won't work
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import scenario
@scenario("wrong.feature", "Given after Then")
def test_wrong():
pass
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(error=1)
result.stdout.fnmatch_lines("*FeatureError: Given steps must be the first within the Scenario.*")
def test_when_in_given(testdir):
testdir.makefile(
".feature",
wrong=textwrap.dedent(
"""\
Feature: When in given
Scenario: When in Given
Given something else
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import when, scenario
@scenario("wrong.feature", "When in Given")
def test_wrong():
pass
@when("something else")
def something_else():
pass
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(failed=1)
result.stdout.fnmatch_lines(
"*StepDefinitionNotFoundError: "
'Step definition is not found: Given "something else". Line 3 in scenario "When in Given"*'
)
def test_when_in_then(testdir):
testdir.makefile(
".feature",
wrong=textwrap.dedent(
"""\
Feature: When in then
Scenario: When in Then
When something else
Then something else
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import when, scenario
@scenario("wrong.feature", "When in Then")
def test_wrong():
pass
@when("something else")
def something_else():
pass
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(failed=1)
result.stdout.fnmatch_lines(
"*StepDefinitionNotFoundError: "
'Step definition is not found: Then "something else". Line 4 in scenario "When in Then"*'
)
def test_then_in_given(testdir):
testdir.makefile(
".feature",
wrong=textwrap.dedent(
"""\
Feature: Then in given
Scenario: Then in Given
Given nevermind
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import then, scenario
@scenario("wrong.feature", "Then in Given")
def test_wrong():
pass
@then("nevermind")
def nevermind():
assert True
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(failed=1)
result.stdout.fnmatch_lines(
"*StepDefinitionNotFoundError: "
'Step definition is not found: Given "nevermind". Line 3 in scenario "Then in Given"*'
)
def test_given_in_when(testdir):
testdir.makefile(
".feature",
wrong=textwrap.dedent(
"""\
Feature: Given in when
Scenario: Given in When
When something
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import given, scenario
@scenario("wrong.feature", "Given in When")
def test_wrong():
pass
@given("something")
def something():
return "something"
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(failed=1)
result.stdout.fnmatch_lines(
"*StepDefinitionNotFoundError: "
'Step definition is not found: When "something". Line 3 in scenario "Given in When"*'
)
def test_given_in_then(testdir):
testdir.makefile(
".feature",
wrong=textwrap.dedent(
"""\
Feature: Given in then
Scenario: Given in Then
When something else
Then something
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import given, when, scenario
@scenario("wrong.feature", "Given in Then")
def test_wrong():
pass
@when("something else")
def something_else():
pass
@given("something")
def something():
return "something"
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(failed=1)
result.stdout.fnmatch_lines(
"*StepDefinitionNotFoundError: "
'Step definition is not found: Then "something". Line 4 in scenario "Given in Then"*'
)
def test_then_in_when(testdir):
testdir.makefile(
".feature",
wrong=textwrap.dedent(
"""\
Feature: Then in when
Scenario: Then in When
When nevermind
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import then, scenario
@scenario("wrong.feature", "Then in When")
def test_wrong():
pass
@then("nevermind")
def nevermind():
assert True
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(failed=1)
result.stdout.fnmatch_lines(
"*StepDefinitionNotFoundError: "
'Step definition is not found: When "nevermind". Line 3 in scenario "Then in When"*'
)
def test_verbose_output(testdir):
"""Test verbose output of failed feature scenario."""
testdir.makefile(
".feature",
wrong=textwrap.dedent(
"""\
Feature: When in background
Background:
Given I don't always write when in the background, but
When I do
Scenario: When in background
Then its fine
When I do it again
Then its wrong
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import scenario
@scenario("wrong.feature", "When in background")
def test_wrong():
pass
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(error=1)
result.stdout.fnmatch_lines("*FeatureError: Background section can only contain Given steps.*")
result.stdout.fnmatch_lines("*Line number: 5.*")
result.stdout.fnmatch_lines("*Line: When I do.*")
def test_multiple_features_single_file(testdir):
"""Test validation error when multiple features are placed in a single file."""
testdir.makefile(
".feature",
wrong=textwrap.dedent(
"""\
Feature: Feature One
Background:
Given I have A
And I have B
Scenario: Do something with A
When I do something with A
Then something about B
Feature: Feature Two
Background:
Given I have A
Scenario: Something that just needs A
When I do something else with A
Then something else about B
Scenario: Something that needs B again
Given I have B
When I do something else with B
Then something else about A and B
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import then, scenario
@scenario("wrong.feature", "Do something with A")
def test_wrong():
pass
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(error=1)
result.stdout.fnmatch_lines("*FeatureError: Multiple features are not allowed in a single feature file.*")
| 24.67191
| 110
| 0.523909
| 1,040
| 10,979
| 5.446154
| 0.092308
| 0.029661
| 0.046963
| 0.059322
| 0.835629
| 0.789371
| 0.757592
| 0.72899
| 0.72899
| 0.718573
| 0
| 0.002827
| 0.387831
| 10,979
| 444
| 111
| 24.727477
| 0.839905
| 0.01348
| 0
| 0.632768
| 0
| 0
| 0.231092
| 0.033149
| 0
| 0
| 0
| 0
| 0.067797
| 1
| 0.067797
| false
| 0
| 0.00565
| 0
| 0.073446
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c1c5f6525320e13f554c1004dbdb3df7ca6099b
| 2,089
|
py
|
Python
|
blender/2.79/scripts/addons/add_curve_sapling/presets/quaking_aspen.py
|
uzairakbar/bpy2.79
|
3a3e0004ac6783c4e4b89d939e4432de99026a85
|
[
"MIT"
] | 2
|
2019-11-27T09:05:42.000Z
|
2020-02-20T01:25:23.000Z
|
add_curve_sapling/presets/quaking_aspen.py
|
1-MillionParanoidTterabytes/blender-addons-master
|
acc8fc23a38e6e89099c3e5079bea31ce85da06a
|
[
"Unlicense"
] | null | null | null |
add_curve_sapling/presets/quaking_aspen.py
|
1-MillionParanoidTterabytes/blender-addons-master
|
acc8fc23a38e6e89099c3e5079bea31ce85da06a
|
[
"Unlicense"
] | 4
|
2020-02-19T20:02:26.000Z
|
2022-02-11T18:47:56.000Z
|
{'leafScale': 0.17000000178813934, 'autoTaper': True, 'customShape': (0.5, 1.0, 0.30000001192092896, 0.5), 'leafShape': 'hex', 'curve': (0.0, -40.0, -40.0, 0.0), 'ratio': 0.014999999664723873, 'splitBias': 0.0, 'pruneWidth': 0.4000000059604645, 'downAngleV': (0.0, 80.0, 10.0, 10.0), 'rotate': (99.5, 137.5, 137.5, 137.5), 'pruneRatio': 1.0, 'leafDownAngle': 45.0, 'makeMesh': False, 'radiusTweak': (1.0, 1.0, 1.0, 1.0), 'rMode': 'rotate', 'splitAngleV': (0.0, 0.0, 0.0, 0.0), 'branchDist': 1.0, 'bevel': False, 'minRadius': 0.001500000013038516, 'prune': False, 'leafRotateV': 0.0, 'splitAngle': (0.0, 0.0, 0.0, 0.0), 'armAnim': False, 'boneStep': (1, 1, 1, 1), 'pruneBase': 0.30000001192092896, 'taperCrown': 0.0, 'baseSplits': 0, 'baseSize_s': 0.25, 'handleType': '0', 'baseSize': 0.4000000059604645, 'af1': 1.0, 'levels': 2, 'leafScaleV': 0.0, 'resU': 4, 'seed': 0, 'downAngle': (90.0, 110.0, 45.0, 45.0), 'leafangle': 0.0, 'scaleV0': 0.10000000149011612, 'prunePowerHigh': 0.5, 'splitByLen': True, 'wind': 1.0, 'shape': '7', 'prunePowerLow': 0.0010000000474974513, 'scale': 13.0, 'leafAnim': False, 'curveBack': (0.0, 0.0, 0.0, 0.0), 'leafScaleX': 1.0, 'horzLeaves': True, 'splitHeight': 0.20000000298023224, 'leafScaleT': 0.0, 'scaleV': 3.0, 'leafDist': '6', 'nrings': 0, 'curveRes': (8, 5, 3, 1), 'shapeS': '4', 'bevelRes': 0, 'useOldDownAngle': False, 'useParentAngle': True, 'armLevels': 2, 'scale0': 1.0, 'taper': (1.0, 1.0, 1.0, 1.0), 'pruneWidthPeak': 0.6000000238418579, 'previewArm': False, 'leaves': 25, 'ratioPower': 1.100000023841858, 'gustF': 0.07500000298023224, 'curveV': (20.0, 50.0, 75.0, 0.0), 'showLeaves': False, 'frameRate': 1.0, 'length': (1.0, 0.30000001192092896, 0.6000000238418579, 0.44999998807907104), 'branches': (0, 50, 30, 10), 'useArm': False, 'loopFrames': 0, 'gust': 1.0, 'af3': 4.0, 'closeTip': False, 'leafRotate': 137.5, 'attractUp': (0.0, 0.0, 0.5, 0.5), 'leafDownAngleV': 10.0, 'rootFlare': 1.0, 'af2': 1.0, 'lengthV': (0.0, 0.0, 0.0, 0.0), 'rotateV': (15.0, 0.0, 0.0, 0.0), 'attractOut': (0.0, 0.0, 0.0, 0.0), 'segSplits': (0.0, 0.0, 0.0, 0.0)}
| 2,089
| 2,089
| 0.622786
| 335
| 2,089
| 3.880597
| 0.370149
| 0.101538
| 0.106154
| 0.110769
| 0.100769
| 0.054615
| 0.054615
| 0.049231
| 0
| 0
| 0
| 0.281183
| 0.109622
| 2,089
| 1
| 2,089
| 2,089
| 0.417742
| 0
| 0
| 0
| 0
| 0
| 0.344976
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c4b9094f8e716d5946d7dfa67e6d23773fdb5c9
| 140
|
py
|
Python
|
IP_Info/bokpc3_IP.py
|
mplesser/soguiders
|
915ea962c6a2736d2a617c726db427fd8ec3cc45
|
[
"MIT"
] | null | null | null |
IP_Info/bokpc3_IP.py
|
mplesser/soguiders
|
915ea962c6a2736d2a617c726db427fd8ec3cc45
|
[
"MIT"
] | null | null | null |
IP_Info/bokpc3_IP.py
|
mplesser/soguiders
|
915ea962c6a2736d2a617c726db427fd8ec3cc45
|
[
"MIT"
] | null | null | null |
lan='Local Area Connection'
ip='140.252.86.14'
subnetmask='255.255.255.0'
gateway='140.252.86.1'
dns1='140.252.86.112'
dns2='128.196.208.2'
| 20
| 27
| 0.707143
| 29
| 140
| 3.413793
| 0.724138
| 0.181818
| 0.242424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.393939
| 0.057143
| 140
| 6
| 28
| 23.333333
| 0.356061
| 0
| 0
| 0
| 0
| 0
| 0.614286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c8fe6d9c0acb3967c863f49127e355d10b380d4
| 5,371
|
py
|
Python
|
tests/integration/test_webhooks_signals.py
|
aavcc/taiga-openshift
|
7c33284573ceed38f755b8159ad83f3f68d2f7cb
|
[
"MIT"
] | null | null | null |
tests/integration/test_webhooks_signals.py
|
aavcc/taiga-openshift
|
7c33284573ceed38f755b8159ad83f3f68d2f7cb
|
[
"MIT"
] | null | null | null |
tests/integration/test_webhooks_signals.py
|
aavcc/taiga-openshift
|
7c33284573ceed38f755b8159ad83f3f68d2f7cb
|
[
"MIT"
] | 1
|
2018-06-07T10:58:15.000Z
|
2018-06-07T10:58:15.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# Copyright (C) 2014-2017 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from unittest.mock import patch
from unittest.mock import Mock
from .. import factories as f
from taiga.projects.history import services
pytestmark = pytest.mark.django_db(transaction=True)
def test_new_object_with_one_webhook_signal(settings):
settings.WEBHOOKS_ENABLED = True
project = f.ProjectFactory()
f.WebhookFactory.create(project=project)
objects = [
f.IssueFactory.create(project=project),
f.TaskFactory.create(project=project),
f.UserStoryFactory.create(project=project),
f.WikiPageFactory.create(project=project)
]
response = Mock(status_code=200, headers={}, text="ok")
response.elapsed.total_seconds.return_value = 100
for obj in objects:
with patch("taiga.webhooks.tasks.requests.Session.send", return_value=response) as session_send_mock:
services.take_snapshot(obj, user=obj.owner, comment="test")
assert session_send_mock.call_count == 1
for obj in objects:
with patch("taiga.webhooks.tasks.requests.Session.send", return_value=response) as session_send_mock:
services.take_snapshot(obj, user=obj.owner)
assert session_send_mock.call_count == 0
for obj in objects:
with patch("taiga.webhooks.tasks.requests.Session.send", return_value=response) as session_send_mock:
services.take_snapshot(obj, user=obj.owner, comment="test")
assert session_send_mock.call_count == 1
for obj in objects:
with patch("taiga.webhooks.tasks.requests.Session.send", return_value=response) as session_send_mock:
services.take_snapshot(obj, user=obj.owner, comment="test", delete=True)
assert session_send_mock.call_count == 1
def test_new_object_with_two_webhook_signals(settings):
settings.WEBHOOKS_ENABLED = True
project = f.ProjectFactory()
f.WebhookFactory.create(project=project)
f.WebhookFactory.create(project=project)
objects = [
f.IssueFactory.create(project=project),
f.TaskFactory.create(project=project),
f.UserStoryFactory.create(project=project),
f.WikiPageFactory.create(project=project)
]
response = Mock(status_code=200, headers={}, text="ok")
response.elapsed.total_seconds.return_value = 100
for obj in objects:
with patch("taiga.webhooks.tasks.requests.Session.send", return_value=response) as session_send_mock:
services.take_snapshot(obj, user=obj.owner, comment="test")
assert session_send_mock.call_count == 2
for obj in objects:
with patch("taiga.webhooks.tasks.requests.Session.send", return_value=response) as session_send_mock:
services.take_snapshot(obj, user=obj.owner, comment="test")
assert session_send_mock.call_count == 2
for obj in objects:
with patch("taiga.webhooks.tasks.requests.Session.send", return_value=response) as session_send_mock:
services.take_snapshot(obj, user=obj.owner)
assert session_send_mock.call_count == 0
for obj in objects:
with patch("taiga.webhooks.tasks.requests.Session.send", return_value=response) as session_send_mock:
services.take_snapshot(obj, user=obj.owner, comment="test", delete=True)
assert session_send_mock.call_count == 2
def test_send_request_one_webhook_signal(settings):
settings.WEBHOOKS_ENABLED = True
project = f.ProjectFactory()
f.WebhookFactory.create(project=project)
objects = [
f.IssueFactory.create(project=project),
f.TaskFactory.create(project=project),
f.UserStoryFactory.create(project=project),
f.WikiPageFactory.create(project=project)
]
response = Mock(status_code=200, headers={}, text="ok")
response.elapsed.total_seconds.return_value = 100
for obj in objects:
with patch("taiga.webhooks.tasks.requests.Session.send", return_value=response) as session_send_mock:
services.take_snapshot(obj, user=obj.owner, comment="test")
assert session_send_mock.call_count == 1
for obj in objects:
with patch("taiga.webhooks.tasks.requests.Session.send", return_value=response) as session_send_mock:
services.take_snapshot(obj, user=obj.owner, comment="test", delete=True)
assert session_send_mock.call_count == 1
| 41.960938
| 109
| 0.718116
| 713
| 5,371
| 5.269285
| 0.230014
| 0.087836
| 0.079851
| 0.055896
| 0.78387
| 0.762044
| 0.753527
| 0.735161
| 0.735161
| 0.735161
| 0
| 0.015964
| 0.183578
| 5,371
| 127
| 110
| 42.291339
| 0.840821
| 0.177807
| 0
| 0.855422
| 0
| 0
| 0.104162
| 0.09552
| 0
| 0
| 0
| 0
| 0.120482
| 1
| 0.036145
| false
| 0
| 0.060241
| 0
| 0.096386
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7cabfc9244e6f3bdb4d8d272ee0b18bf01c6073f
| 123
|
py
|
Python
|
topologylayer/__init__.py
|
Filco306/TopologyLayer
|
1d6261017a80cff0ee06bb896ded40777b0989b4
|
[
"MIT"
] | 250
|
2019-06-03T17:24:12.000Z
|
2022-03-31T01:13:48.000Z
|
topologylayer/__init__.py
|
kyle-gao/TopologyLayer
|
3da7af35a58bd1438d28d6cca49b40f90cb7ee14
|
[
"MIT"
] | 20
|
2019-06-04T06:44:43.000Z
|
2022-03-08T23:52:50.000Z
|
topologylayer/__init__.py
|
kyle-gao/TopologyLayer
|
3da7af35a58bd1438d28d6cca49b40f90cb7ee14
|
[
"MIT"
] | 63
|
2019-06-03T17:23:19.000Z
|
2022-02-08T16:34:39.000Z
|
import topologylayer.nn
import topologylayer.functional
from topologylayer.functional.persistence import SimplicialComplex
| 30.75
| 66
| 0.902439
| 12
| 123
| 9.25
| 0.583333
| 0.342342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065041
| 123
| 3
| 67
| 41
| 0.965217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7cce6ce5df154fbde2d2bc7df26072c9a4114636
| 515
|
py
|
Python
|
backend/naki/naki/schemas/user.py
|
iimcz/emod
|
432094c020247597a94e95f76cc524c20b68b685
|
[
"MIT"
] | null | null | null |
backend/naki/naki/schemas/user.py
|
iimcz/emod
|
432094c020247597a94e95f76cc524c20b68b685
|
[
"MIT"
] | 6
|
2021-03-08T23:32:15.000Z
|
2022-02-26T08:11:38.000Z
|
backend/naki/naki/schemas/user.py
|
iimcz/emod
|
432094c020247597a94e95f76cc524c20b68b685
|
[
"MIT"
] | null | null | null |
from naki.model.user import User
from colanderalchemy import SQLAlchemySchemaNode
import colander
# UserSchema = SQLAlchemySchemaNode(User)
class UserSchema(colander.MappingSchema):
id_user = colander.SchemaNode(colander.String(), missing='')
username = colander.SchemaNode(colander.String(), missing='')
fullname = colander.SchemaNode(colander.String(), missing='')
passwd = colander.SchemaNode(colander.String(), missing='')
auth_level = colander.SchemaNode(colander.Integer(), missing=0)
| 34.333333
| 67
| 0.761165
| 52
| 515
| 7.5
| 0.423077
| 0.230769
| 0.333333
| 0.328205
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002198
| 0.116505
| 515
| 14
| 68
| 36.785714
| 0.854945
| 0.075728
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.111111
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
7cdce49c830546f42eb9e6850e82e6a3acb5ba59
| 34
|
py
|
Python
|
x_net_django_email_template/templatetags/__init__.py
|
x-net-services/x-net-html-template
|
48eca038b61834e96a0573b96bc90db113d63990
|
[
"MIT"
] | null | null | null |
x_net_django_email_template/templatetags/__init__.py
|
x-net-services/x-net-html-template
|
48eca038b61834e96a0573b96bc90db113d63990
|
[
"MIT"
] | null | null | null |
x_net_django_email_template/templatetags/__init__.py
|
x-net-services/x-net-html-template
|
48eca038b61834e96a0573b96bc90db113d63990
|
[
"MIT"
] | null | null | null |
from .html_email import * # noqa
| 17
| 33
| 0.705882
| 5
| 34
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 34
| 1
| 34
| 34
| 0.851852
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
861e4ec237a1a32bda8066ed8991aea231c8ada7
| 139
|
py
|
Python
|
src/__init__.py
|
fish98/Dress
|
b0b77816101663616e72dfe7133090a1b74098d4
|
[
"WTFPL"
] | null | null | null |
src/__init__.py
|
fish98/Dress
|
b0b77816101663616e72dfe7133090a1b74098d4
|
[
"WTFPL"
] | 1
|
2020-02-25T12:56:02.000Z
|
2020-02-25T13:14:52.000Z
|
src/__init__.py
|
fish98/Dress
|
b0b77816101663616e72dfe7133090a1b74098d4
|
[
"WTFPL"
] | null | null | null |
from selenium import webdriver
import datetime
import time
from selenium.webdriver.chrome.options import Options
import dress
dress.main()
| 19.857143
| 53
| 0.848921
| 19
| 139
| 6.210526
| 0.526316
| 0.20339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107914
| 139
| 7
| 54
| 19.857143
| 0.951613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.833333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
863c348fd2f21e393a78f9ae40b0319f74dc6f58
| 68,971
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/neighborrange_a1e9ecb177af994ec56d9e054fd677fb.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/neighborrange_a1e9ecb177af994ec56d9e054fd677fb.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/neighborrange_a1e9ecb177af994ec56d9e054fd677fb.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class NeighborRange(Base):
"""This object holds information about a BGP4 internal or external neighbor router.
The NeighborRange class encapsulates a list of neighborRange resources that are managed by the user.
A list of resources can be retrieved from the server using the NeighborRange.find() method.
The list can be managed by using the NeighborRange.add() and NeighborRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'neighborRange'
_SDM_ATT_MAP = {
'AsNumMode': 'asNumMode',
'Authentication': 'authentication',
'BfdModeOfOperation': 'bfdModeOfOperation',
'BgpId': 'bgpId',
'DutIpAddress': 'dutIpAddress',
'Enable4ByteAsNum': 'enable4ByteAsNum',
'EnableActAsRestarted': 'enableActAsRestarted',
'EnableBfdRegistration': 'enableBfdRegistration',
'EnableBgpId': 'enableBgpId',
'EnableDiscardIxiaGeneratedRoutes': 'enableDiscardIxiaGeneratedRoutes',
'EnableGracefulRestart': 'enableGracefulRestart',
'EnableLinkFlap': 'enableLinkFlap',
'EnableNextHop': 'enableNextHop',
'EnableOptionalParameters': 'enableOptionalParameters',
'EnableSendIxiaSignatureWithRoutes': 'enableSendIxiaSignatureWithRoutes',
'EnableStaggeredStart': 'enableStaggeredStart',
'Enabled': 'enabled',
'Evpn': 'evpn',
'EvpnNextHopCount': 'evpnNextHopCount',
'HoldTimer': 'holdTimer',
'InterfaceStartIndex': 'interfaceStartIndex',
'InterfaceType': 'interfaceType',
'Interfaces': 'interfaces',
'IpV4Mdt': 'ipV4Mdt',
'IpV4Mpls': 'ipV4Mpls',
'IpV4MplsVpn': 'ipV4MplsVpn',
'IpV4Multicast': 'ipV4Multicast',
'IpV4MulticastVpn': 'ipV4MulticastVpn',
'IpV4Unicast': 'ipV4Unicast',
'IpV6Mpls': 'ipV6Mpls',
'IpV6MplsVpn': 'ipV6MplsVpn',
'IpV6Multicast': 'ipV6Multicast',
'IpV6MulticastVpn': 'ipV6MulticastVpn',
'IpV6Unicast': 'ipV6Unicast',
'IsAsbr': 'isAsbr',
'IsInterfaceLearnedInfoAvailable': 'isInterfaceLearnedInfoAvailable',
'IsLearnedInfoRefreshed': 'isLearnedInfoRefreshed',
'LinkFlapDownTime': 'linkFlapDownTime',
'LinkFlapUpTime': 'linkFlapUpTime',
'LocalAsNumber': 'localAsNumber',
'LocalIpAddress': 'localIpAddress',
'Md5Key': 'md5Key',
'NextHop': 'nextHop',
'NumUpdatesPerIteration': 'numUpdatesPerIteration',
'RangeCount': 'rangeCount',
'RemoteAsNumber': 'remoteAsNumber',
'RestartTime': 'restartTime',
'StaggeredStartPeriod': 'staggeredStartPeriod',
'StaleTime': 'staleTime',
'TcpWindowSize': 'tcpWindowSize',
'TrafficGroupId': 'trafficGroupId',
'TtlValue': 'ttlValue',
'Type': 'type',
'UpdateInterval': 'updateInterval',
'Vpls': 'vpls',
}
_SDM_ENUM_MAP = {
'asNumMode': ['fixed', 'increment'],
'authentication': ['null', 'md5'],
'bfdModeOfOperation': ['multiHop', 'singleHop'],
'type': ['internal', 'external'],
}
def __init__(self, parent, list_op=False):
super(NeighborRange, self).__init__(parent, list_op)
@property
def Bgp4VpnBgpAdVplsRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bgp4vpnbgpadvplsrange_c396d4abd272d60c3ff5958f98263958.Bgp4VpnBgpAdVplsRange): An instance of the Bgp4VpnBgpAdVplsRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bgp4vpnbgpadvplsrange_c396d4abd272d60c3ff5958f98263958 import Bgp4VpnBgpAdVplsRange
if self._properties.get('Bgp4VpnBgpAdVplsRange', None) is not None:
return self._properties.get('Bgp4VpnBgpAdVplsRange')
else:
return Bgp4VpnBgpAdVplsRange(self)
@property
def EthernetSegments(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ethernetsegments_a0eef4099ef38ee0e07ecf7430536119.EthernetSegments): An instance of the EthernetSegments class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ethernetsegments_a0eef4099ef38ee0e07ecf7430536119 import EthernetSegments
if self._properties.get('EthernetSegments', None) is not None:
return self._properties.get('EthernetSegments')
else:
return EthernetSegments(self)
@property
def InterfaceLearnedInfo(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.interfacelearnedinfo_44709d044bd5612e19aaa934437e496a.InterfaceLearnedInfo): An instance of the InterfaceLearnedInfo class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.interfacelearnedinfo_44709d044bd5612e19aaa934437e496a import InterfaceLearnedInfo
if self._properties.get('InterfaceLearnedInfo', None) is not None:
return self._properties.get('InterfaceLearnedInfo')
else:
return InterfaceLearnedInfo(self)._select()
@property
def L2Site(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.l2site_261b4b7984b4a56f96a23ca529af873f.L2Site): An instance of the L2Site class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.l2site_261b4b7984b4a56f96a23ca529af873f import L2Site
if self._properties.get('L2Site', None) is not None:
return self._properties.get('L2Site')
else:
return L2Site(self)
@property
def L3Site(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.l3site_1184c1264fe43eeeb88002bee9622490.L3Site): An instance of the L3Site class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.l3site_1184c1264fe43eeeb88002bee9622490 import L3Site
if self._properties.get('L3Site', None) is not None:
return self._properties.get('L3Site')
else:
return L3Site(self)
@property
def LearnedFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.learnedfilter_df26bdb55c5d9a2a87a7eb099776d203.LearnedFilter): An instance of the LearnedFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.learnedfilter_df26bdb55c5d9a2a87a7eb099776d203 import LearnedFilter
if self._properties.get('LearnedFilter', None) is not None:
return self._properties.get('LearnedFilter')
else:
return LearnedFilter(self)._select()
@property
def LearnedInformation(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.learnedinformation_1802ba18af469548428332b926b4e374.LearnedInformation): An instance of the LearnedInformation class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.learnedinformation_1802ba18af469548428332b926b4e374 import LearnedInformation
if self._properties.get('LearnedInformation', None) is not None:
return self._properties.get('LearnedInformation')
else:
return LearnedInformation(self)._select()
@property
def MplsRouteRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplsrouterange_d92b7c314e154932c6a571f5bccc9139.MplsRouteRange): An instance of the MplsRouteRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplsrouterange_d92b7c314e154932c6a571f5bccc9139 import MplsRouteRange
if self._properties.get('MplsRouteRange', None) is not None:
return self._properties.get('MplsRouteRange')
else:
return MplsRouteRange(self)
@property
def OpaqueRouteRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.opaquerouterange_758cfa0f54d8a32ec8c2cdda163db9de.OpaqueRouteRange): An instance of the OpaqueRouteRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.opaquerouterange_758cfa0f54d8a32ec8c2cdda163db9de import OpaqueRouteRange
if self._properties.get('OpaqueRouteRange', None) is not None:
return self._properties.get('OpaqueRouteRange')
else:
return OpaqueRouteRange(self)
@property
def RouteImportOptions(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.routeimportoptions_6dbeb38f5cd6a11bd94fb0d2945c0d1b.RouteImportOptions): An instance of the RouteImportOptions class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.routeimportoptions_6dbeb38f5cd6a11bd94fb0d2945c0d1b import RouteImportOptions
if self._properties.get('RouteImportOptions', None) is not None:
return self._properties.get('RouteImportOptions')
else:
return RouteImportOptions(self)
@property
def RouteRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.routerange_0d3bbd0c1e734e0573f923091baa82c2.RouteRange): An instance of the RouteRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.routerange_0d3bbd0c1e734e0573f923091baa82c2 import RouteRange
if self._properties.get('RouteRange', None) is not None:
return self._properties.get('RouteRange')
else:
return RouteRange(self)
@property
def UserDefinedAfiSafi(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.userdefinedafisafi_963e12659eb9e18aba3316a600da5e38.UserDefinedAfiSafi): An instance of the UserDefinedAfiSafi class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.userdefinedafisafi_963e12659eb9e18aba3316a600da5e38 import UserDefinedAfiSafi
if self._properties.get('UserDefinedAfiSafi', None) is not None:
return self._properties.get('UserDefinedAfiSafi')
else:
return UserDefinedAfiSafi(self)
@property
def AsNumMode(self):
# type: () -> str
"""DEPRECATED
Returns
-------
- str(fixed | increment): (External only) Indicates that each new session uses a different AS number.
"""
return self._get_attribute(self._SDM_ATT_MAP['AsNumMode'])
@AsNumMode.setter
def AsNumMode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['AsNumMode'], value)
@property
def Authentication(self):
# type: () -> str
"""
Returns
-------
- str(null | md5): Select the type of cryptographic authentication to be used for the BGP peers in this peer range.
"""
return self._get_attribute(self._SDM_ATT_MAP['Authentication'])
@Authentication.setter
def Authentication(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Authentication'], value)
@property
def BfdModeOfOperation(self):
# type: () -> str
"""
Returns
-------
- str(multiHop | singleHop): Indicates whether to use a single-hop or a multi-hop mode of operation for the BFD session being created with a BGP peer.
"""
return self._get_attribute(self._SDM_ATT_MAP['BfdModeOfOperation'])
@BfdModeOfOperation.setter
def BfdModeOfOperation(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['BfdModeOfOperation'], value)
@property
def BgpId(self):
# type: () -> str
"""
Returns
-------
- str: The BGP ID used in OPEN messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['BgpId'])
@BgpId.setter
def BgpId(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['BgpId'], value)
@property
def DutIpAddress(self):
# type: () -> str
"""
Returns
-------
- str: The IP address of the DUT router.
"""
return self._get_attribute(self._SDM_ATT_MAP['DutIpAddress'])
@DutIpAddress.setter
def DutIpAddress(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['DutIpAddress'], value)
@property
def Enable4ByteAsNum(self):
# type: () -> bool
"""
Returns
-------
- bool: Enables the 4-byte Autonomous System (AS) number of the DUT/SUT.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enable4ByteAsNum'])
@Enable4ByteAsNum.setter
def Enable4ByteAsNum(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enable4ByteAsNum'], value)
@property
def EnableActAsRestarted(self):
# type: () -> bool
"""
Returns
-------
- bool: Controls the operation of BGP Graceful Restart.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableActAsRestarted'])
@EnableActAsRestarted.setter
def EnableActAsRestarted(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableActAsRestarted'], value)
@property
def EnableBfdRegistration(self):
# type: () -> bool
"""
Returns
-------
- bool: Enables the BFD registration.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableBfdRegistration'])
@EnableBfdRegistration.setter
def EnableBfdRegistration(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableBfdRegistration'], value)
@property
def EnableBgpId(self):
# type: () -> bool
"""
Returns
-------
- bool: The BGP ID used in OPEN messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableBgpId'])
@EnableBgpId.setter
def EnableBgpId(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableBgpId'], value)
@property
def EnableDiscardIxiaGeneratedRoutes(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, enables the discard of Ixia generated routes
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableDiscardIxiaGeneratedRoutes'])
@EnableDiscardIxiaGeneratedRoutes.setter
def EnableDiscardIxiaGeneratedRoutes(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableDiscardIxiaGeneratedRoutes'], value)
@property
def EnableGracefulRestart(self):
# type: () -> bool
"""
Returns
-------
- bool: Controls the operation of BGP Graceful Restart.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableGracefulRestart'])
@EnableGracefulRestart.setter
def EnableGracefulRestart(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableGracefulRestart'], value)
@property
def EnableLinkFlap(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, enables link flap
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableLinkFlap'])
@EnableLinkFlap.setter
def EnableLinkFlap(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableLinkFlap'], value)
@property
def EnableNextHop(self):
# type: () -> bool
"""
Returns
-------
- bool: Used for IPv4 traffic. Controls the use of the NEXT_HOP attribute. (default = disabled)
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableNextHop'])
@EnableNextHop.setter
def EnableNextHop(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableNextHop'], value)
@property
def EnableOptionalParameters(self):
# type: () -> bool
"""
Returns
-------
- bool: Controls how an OPEN is conducted in the presence of optional parameters.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableOptionalParameters'])
@EnableOptionalParameters.setter
def EnableOptionalParameters(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableOptionalParameters'], value)
@property
def EnableSendIxiaSignatureWithRoutes(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, enables sending of Ixia signature with routes
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableSendIxiaSignatureWithRoutes'])
@EnableSendIxiaSignatureWithRoutes.setter
def EnableSendIxiaSignatureWithRoutes(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableSendIxiaSignatureWithRoutes'], value)
@property
def EnableStaggeredStart(self):
# type: () -> bool
"""
Returns
-------
- bool: Controls the staggering and period of initial start messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableStaggeredStart'])
@EnableStaggeredStart.setter
def EnableStaggeredStart(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableStaggeredStart'], value)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Enables or disables simulation of the router.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def Evpn(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, then this BGP peer range supports BGP MPLS Based Ethernet VPN per draft-ietf-l2vpn-evpn-03. Default value is false.
"""
return self._get_attribute(self._SDM_ATT_MAP['Evpn'])
@Evpn.setter
def Evpn(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Evpn'], value)
@property
def EvpnNextHopCount(self):
# type: () -> int
"""
Returns
-------
- number: It is used to replicate the traffic among the available Next Hops in Ingress Replication mode. Default value is 1. Minimum value is 1 and maximum value is 255.
"""
return self._get_attribute(self._SDM_ATT_MAP['EvpnNextHopCount'])
@EvpnNextHopCount.setter
def EvpnNextHopCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['EvpnNextHopCount'], value)
@property
def HoldTimer(self):
# type: () -> int
"""
Returns
-------
- number: The period of time between KEEP-ALIVE messages sent to the DUT.
"""
return self._get_attribute(self._SDM_ATT_MAP['HoldTimer'])
@HoldTimer.setter
def HoldTimer(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['HoldTimer'], value)
@property
def InterfaceStartIndex(self):
# type: () -> int
"""
Returns
-------
- number: The assigned protocol interface ID for this SM interface.
"""
return self._get_attribute(self._SDM_ATT_MAP['InterfaceStartIndex'])
@InterfaceStartIndex.setter
def InterfaceStartIndex(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['InterfaceStartIndex'], value)
@property
def InterfaceType(self):
# type: () -> str
"""
Returns
-------
- str: The type of interface to be selected for this BGP interface. One of:Protocol Interface, DHCP, PPP
"""
return self._get_attribute(self._SDM_ATT_MAP['InterfaceType'])
@InterfaceType.setter
def InterfaceType(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['InterfaceType'], value)
@property
def Interfaces(self):
# type: () -> str
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/vport/.../interface | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range): The interfaces that are associated with the selected interface type.
"""
return self._get_attribute(self._SDM_ATT_MAP['Interfaces'])
@Interfaces.setter
def Interfaces(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Interfaces'], value)
@property
def IpV4Mdt(self):
# type: () -> bool
"""
Returns
-------
- bool: Enables the use of this Data MDT range on the simulated interface.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpV4Mdt'])
@IpV4Mdt.setter
def IpV4Mdt(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['IpV4Mdt'], value)
@property
def IpV4Mpls(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, this BGP router/peer supports the IPv4 MPLS address family.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpV4Mpls'])
@IpV4Mpls.setter
def IpV4Mpls(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['IpV4Mpls'], value)
@property
def IpV4MplsVpn(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, this BGP router/peer supports the IPv4 MPLS/VPN address family.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpV4MplsVpn'])
@IpV4MplsVpn.setter
def IpV4MplsVpn(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['IpV4MplsVpn'], value)
@property
def IpV4Multicast(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, this BGP router/peer supports the IPv4 multicast address family.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpV4Multicast'])
@IpV4Multicast.setter
def IpV4Multicast(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['IpV4Multicast'], value)
@property
def IpV4MulticastVpn(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, this BGP router/peer supports the IPv4 Multicast/VPN address family.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpV4MulticastVpn'])
@IpV4MulticastVpn.setter
def IpV4MulticastVpn(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['IpV4MulticastVpn'], value)
@property
def IpV4Unicast(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, this BGP router/peer supports the IPv4 unicast address family.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpV4Unicast'])
@IpV4Unicast.setter
def IpV4Unicast(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['IpV4Unicast'], value)
@property
def IpV6Mpls(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, this BGP router/peer supports the IPv6 MPLS address family.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpV6Mpls'])
@IpV6Mpls.setter
def IpV6Mpls(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['IpV6Mpls'], value)
@property
def IpV6MplsVpn(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, this BGP router/peer supports the IPv6 MPLS/VPN address family.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpV6MplsVpn'])
@IpV6MplsVpn.setter
def IpV6MplsVpn(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['IpV6MplsVpn'], value)
@property
def IpV6Multicast(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, this BGP router/peer supports the IPv6 multicast address family.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpV6Multicast'])
@IpV6Multicast.setter
def IpV6Multicast(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['IpV6Multicast'], value)
@property
def IpV6MulticastVpn(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, this BGP router/peer supports the IPv6 Multicast/VPN address family.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpV6MulticastVpn'])
@IpV6MulticastVpn.setter
def IpV6MulticastVpn(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['IpV6MulticastVpn'], value)
@property
def IpV6Unicast(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, this BGP router/peer supports the IPv6 unicast address family.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpV6Unicast'])
@IpV6Unicast.setter
def IpV6Unicast(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['IpV6Unicast'], value)
@property
def IsAsbr(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, it is ASBR
"""
return self._get_attribute(self._SDM_ATT_MAP['IsAsbr'])
@IsAsbr.setter
def IsAsbr(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['IsAsbr'], value)
@property
def IsInterfaceLearnedInfoAvailable(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, learned information is made avavilable.
"""
return self._get_attribute(self._SDM_ATT_MAP['IsInterfaceLearnedInfoAvailable'])
@property
def IsLearnedInfoRefreshed(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, learned information is refreshed.
"""
return self._get_attribute(self._SDM_ATT_MAP['IsLearnedInfoRefreshed'])
@property
def LinkFlapDownTime(self):
# type: () -> int
"""
Returns
-------
- number: Signifies the link flap down time
"""
return self._get_attribute(self._SDM_ATT_MAP['LinkFlapDownTime'])
@LinkFlapDownTime.setter
def LinkFlapDownTime(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LinkFlapDownTime'], value)
@property
def LinkFlapUpTime(self):
# type: () -> int
"""
Returns
-------
- number: Signifies the link flap up time
"""
return self._get_attribute(self._SDM_ATT_MAP['LinkFlapUpTime'])
@LinkFlapUpTime.setter
def LinkFlapUpTime(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LinkFlapUpTime'], value)
@property
def LocalAsNumber(self):
# type: () -> str
"""
Returns
-------
- str: (External only) The first AS Num assigned to the simulated neighbor router. May be set for external neighbors on any port type, but only Linux-based ports may set this for internal neighbors.
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalAsNumber'])
@LocalAsNumber.setter
def LocalAsNumber(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['LocalAsNumber'], value)
@property
def LocalIpAddress(self):
# type: () -> str
"""
Returns
-------
- str: The first IP address for the simulated neighbor routers and the number of routers.
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalIpAddress'])
@LocalIpAddress.setter
def LocalIpAddress(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['LocalIpAddress'], value)
@property
def Md5Key(self):
# type: () -> str
"""
Returns
-------
- str: (Active only when MD5 is selected in the Authentication Type field.) (String) Enter a value to be used as a secret MD5 Key for authentication. The maximum length allowed is 255 characters.One MD5 key can be configured per BGP peer range. Sessions from all peers in this peer range will use this MD5 key if MD5 is enabled.
"""
return self._get_attribute(self._SDM_ATT_MAP['Md5Key'])
@Md5Key.setter
def Md5Key(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Md5Key'], value)
@property
def NextHop(self):
# type: () -> str
"""
Returns
-------
- str: If enableNextHop is true, this is the IPv4 address used as the next hop. (default = 0.0.0.0)
"""
return self._get_attribute(self._SDM_ATT_MAP['NextHop'])
@NextHop.setter
def NextHop(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['NextHop'], value)
@property
def NumUpdatesPerIteration(self):
# type: () -> int
"""
Returns
-------
- number: When the protocol server operates on older ports that do not possess a local processor, this tuning parameter controls how many UPDATE messages will be sent at a time. When many routers are being simulated on such a port, changing this value may help to increase or decrease performance. (default = 1)
"""
return self._get_attribute(self._SDM_ATT_MAP['NumUpdatesPerIteration'])
@NumUpdatesPerIteration.setter
def NumUpdatesPerIteration(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NumUpdatesPerIteration'], value)
@property
def RangeCount(self):
# type: () -> int
"""
Returns
-------
- number: The number of routers.
"""
return self._get_attribute(self._SDM_ATT_MAP['RangeCount'])
@RangeCount.setter
def RangeCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['RangeCount'], value)
@property
def RemoteAsNumber(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: The remote Autonomous System number associated with the routers.
"""
return self._get_attribute(self._SDM_ATT_MAP['RemoteAsNumber'])
@RemoteAsNumber.setter
def RemoteAsNumber(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['RemoteAsNumber'], value)
@property
def RestartTime(self):
# type: () -> int
"""
Returns
-------
- number: Controls the operation of BGP Graceful Restart.
"""
return self._get_attribute(self._SDM_ATT_MAP['RestartTime'])
@RestartTime.setter
def RestartTime(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['RestartTime'], value)
@property
def StaggeredStartPeriod(self):
# type: () -> int
"""
Returns
-------
- number: Controls the staggering and period of initial start messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['StaggeredStartPeriod'])
@StaggeredStartPeriod.setter
def StaggeredStartPeriod(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['StaggeredStartPeriod'], value)
@property
def StaleTime(self):
# type: () -> int
"""
Returns
-------
- number: Controls the operation of BGP Graceful Restart.
"""
return self._get_attribute(self._SDM_ATT_MAP['StaleTime'])
@StaleTime.setter
def StaleTime(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['StaleTime'], value)
@property
def TcpWindowSize(self):
# type: () -> int
"""
Returns
-------
- number: (External neighbor only) The TCP window used for communications from the neighbor. (default = 8,192)
"""
return self._get_attribute(self._SDM_ATT_MAP['TcpWindowSize'])
@TcpWindowSize.setter
def TcpWindowSize(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['TcpWindowSize'], value)
@property
def TrafficGroupId(self):
# type: () -> str
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/traffic/.../trafficGroup): The name of the group to which this port is assigned, for the purpose of creating traffic streams among source/destination members of the group.
"""
return self._get_attribute(self._SDM_ATT_MAP['TrafficGroupId'])
@TrafficGroupId.setter
def TrafficGroupId(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['TrafficGroupId'], value)
@property
def TtlValue(self):
# type: () -> int
"""
Returns
-------
- number: The limited number of iterations that a unit of data can experience before the data is discarded.
"""
return self._get_attribute(self._SDM_ATT_MAP['TtlValue'])
@TtlValue.setter
def TtlValue(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['TtlValue'], value)
@property
def Type(self):
# type: () -> str
"""
Returns
-------
- str(internal | external): Indicates that the neighbor is either an internal or external router.
"""
return self._get_attribute(self._SDM_ATT_MAP['Type'])
@Type.setter
def Type(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Type'], value)
@property
def UpdateInterval(self):
# type: () -> int
"""
Returns
-------
- number: The frequency with which UPDATE messages are sent to the DUT.
"""
return self._get_attribute(self._SDM_ATT_MAP['UpdateInterval'])
@UpdateInterval.setter
def UpdateInterval(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['UpdateInterval'], value)
@property
def Vpls(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, this BGP router/peer supports BGP VPLS per the Kompella draft.
"""
return self._get_attribute(self._SDM_ATT_MAP['Vpls'])
@Vpls.setter
def Vpls(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Vpls'], value)
def update(self, AsNumMode=None, Authentication=None, BfdModeOfOperation=None, BgpId=None, DutIpAddress=None, Enable4ByteAsNum=None, EnableActAsRestarted=None, EnableBfdRegistration=None, EnableBgpId=None, EnableDiscardIxiaGeneratedRoutes=None, EnableGracefulRestart=None, EnableLinkFlap=None, EnableNextHop=None, EnableOptionalParameters=None, EnableSendIxiaSignatureWithRoutes=None, EnableStaggeredStart=None, Enabled=None, Evpn=None, EvpnNextHopCount=None, HoldTimer=None, InterfaceStartIndex=None, InterfaceType=None, Interfaces=None, IpV4Mdt=None, IpV4Mpls=None, IpV4MplsVpn=None, IpV4Multicast=None, IpV4MulticastVpn=None, IpV4Unicast=None, IpV6Mpls=None, IpV6MplsVpn=None, IpV6Multicast=None, IpV6MulticastVpn=None, IpV6Unicast=None, IsAsbr=None, LinkFlapDownTime=None, LinkFlapUpTime=None, LocalAsNumber=None, LocalIpAddress=None, Md5Key=None, NextHop=None, NumUpdatesPerIteration=None, RangeCount=None, RemoteAsNumber=None, RestartTime=None, StaggeredStartPeriod=None, StaleTime=None, TcpWindowSize=None, TrafficGroupId=None, TtlValue=None, Type=None, UpdateInterval=None, Vpls=None):
# type: (str, str, str, str, str, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, int, int, int, str, str, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, int, int, str, str, str, str, int, int, int, int, int, int, int, str, int, str, int, bool) -> NeighborRange
"""Updates neighborRange resource on the server.
Args
----
- AsNumMode (str(fixed | increment)): (External only) Indicates that each new session uses a different AS number.
- Authentication (str(null | md5)): Select the type of cryptographic authentication to be used for the BGP peers in this peer range.
- BfdModeOfOperation (str(multiHop | singleHop)): Indicates whether to use a single-hop or a multi-hop mode of operation for the BFD session being created with a BGP peer.
- BgpId (str): The BGP ID used in OPEN messages.
- DutIpAddress (str): The IP address of the DUT router.
- Enable4ByteAsNum (bool): Enables the 4-byte Autonomous System (AS) number of the DUT/SUT.
- EnableActAsRestarted (bool): Controls the operation of BGP Graceful Restart.
- EnableBfdRegistration (bool): Enables the BFD registration.
- EnableBgpId (bool): The BGP ID used in OPEN messages.
- EnableDiscardIxiaGeneratedRoutes (bool): If true, enables the discard of Ixia generated routes
- EnableGracefulRestart (bool): Controls the operation of BGP Graceful Restart.
- EnableLinkFlap (bool): If true, enables link flap
- EnableNextHop (bool): Used for IPv4 traffic. Controls the use of the NEXT_HOP attribute. (default = disabled)
- EnableOptionalParameters (bool): Controls how an OPEN is conducted in the presence of optional parameters.
- EnableSendIxiaSignatureWithRoutes (bool): If true, enables sending of Ixia signature with routes
- EnableStaggeredStart (bool): Controls the staggering and period of initial start messages.
- Enabled (bool): Enables or disables simulation of the router.
- Evpn (bool): If enabled, then this BGP peer range supports BGP MPLS Based Ethernet VPN per draft-ietf-l2vpn-evpn-03. Default value is false.
- EvpnNextHopCount (number): It is used to replicate the traffic among the available Next Hops in Ingress Replication mode. Default value is 1. Minimum value is 1 and maximum value is 255.
- HoldTimer (number): The period of time between KEEP-ALIVE messages sent to the DUT.
- InterfaceStartIndex (number): The assigned protocol interface ID for this SM interface.
- InterfaceType (str): The type of interface to be selected for this BGP interface. One of:Protocol Interface, DHCP, PPP
- Interfaces (str(None | /api/v1/sessions/1/ixnetwork/vport/.../interface | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range)): The interfaces that are associated with the selected interface type.
- IpV4Mdt (bool): Enables the use of this Data MDT range on the simulated interface.
- IpV4Mpls (bool): If enabled, this BGP router/peer supports the IPv4 MPLS address family.
- IpV4MplsVpn (bool): If enabled, this BGP router/peer supports the IPv4 MPLS/VPN address family.
- IpV4Multicast (bool): If enabled, this BGP router/peer supports the IPv4 multicast address family.
- IpV4MulticastVpn (bool): If true, this BGP router/peer supports the IPv4 Multicast/VPN address family.
- IpV4Unicast (bool): If enabled, this BGP router/peer supports the IPv4 unicast address family.
- IpV6Mpls (bool): If enabled, this BGP router/peer supports the IPv6 MPLS address family.
- IpV6MplsVpn (bool): If enabled, this BGP router/peer supports the IPv6 MPLS/VPN address family.
- IpV6Multicast (bool): If enabled, this BGP router/peer supports the IPv6 multicast address family.
- IpV6MulticastVpn (bool): If true, this BGP router/peer supports the IPv6 Multicast/VPN address family.
- IpV6Unicast (bool): If enabled, this BGP router/peer supports the IPv6 unicast address family.
- IsAsbr (bool): If true, it is ASBR
- LinkFlapDownTime (number): Signifies the link flap down time
- LinkFlapUpTime (number): Signifies the link flap up time
- LocalAsNumber (str): (External only) The first AS Num assigned to the simulated neighbor router. May be set for external neighbors on any port type, but only Linux-based ports may set this for internal neighbors.
- LocalIpAddress (str): The first IP address for the simulated neighbor routers and the number of routers.
- Md5Key (str): (Active only when MD5 is selected in the Authentication Type field.) (String) Enter a value to be used as a secret MD5 Key for authentication. The maximum length allowed is 255 characters.One MD5 key can be configured per BGP peer range. Sessions from all peers in this peer range will use this MD5 key if MD5 is enabled.
- NextHop (str): If enableNextHop is true, this is the IPv4 address used as the next hop. (default = 0.0.0.0)
- NumUpdatesPerIteration (number): When the protocol server operates on older ports that do not possess a local processor, this tuning parameter controls how many UPDATE messages will be sent at a time. When many routers are being simulated on such a port, changing this value may help to increase or decrease performance. (default = 1)
- RangeCount (number): The number of routers.
- RemoteAsNumber (number): The remote Autonomous System number associated with the routers.
- RestartTime (number): Controls the operation of BGP Graceful Restart.
- StaggeredStartPeriod (number): Controls the staggering and period of initial start messages.
- StaleTime (number): Controls the operation of BGP Graceful Restart.
- TcpWindowSize (number): (External neighbor only) The TCP window used for communications from the neighbor. (default = 8,192)
- TrafficGroupId (str(None | /api/v1/sessions/1/ixnetwork/traffic/.../trafficGroup)): The name of the group to which this port is assigned, for the purpose of creating traffic streams among source/destination members of the group.
- TtlValue (number): The limited number of iterations that a unit of data can experience before the data is discarded.
- Type (str(internal | external)): Indicates that the neighbor is either an internal or external router.
- UpdateInterval (number): The frequency with which UPDATE messages are sent to the DUT.
- Vpls (bool): If enabled, this BGP router/peer supports BGP VPLS per the Kompella draft.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, AsNumMode=None, Authentication=None, BfdModeOfOperation=None, BgpId=None, DutIpAddress=None, Enable4ByteAsNum=None, EnableActAsRestarted=None, EnableBfdRegistration=None, EnableBgpId=None, EnableDiscardIxiaGeneratedRoutes=None, EnableGracefulRestart=None, EnableLinkFlap=None, EnableNextHop=None, EnableOptionalParameters=None, EnableSendIxiaSignatureWithRoutes=None, EnableStaggeredStart=None, Enabled=None, Evpn=None, EvpnNextHopCount=None, HoldTimer=None, InterfaceStartIndex=None, InterfaceType=None, Interfaces=None, IpV4Mdt=None, IpV4Mpls=None, IpV4MplsVpn=None, IpV4Multicast=None, IpV4MulticastVpn=None, IpV4Unicast=None, IpV6Mpls=None, IpV6MplsVpn=None, IpV6Multicast=None, IpV6MulticastVpn=None, IpV6Unicast=None, IsAsbr=None, LinkFlapDownTime=None, LinkFlapUpTime=None, LocalAsNumber=None, LocalIpAddress=None, Md5Key=None, NextHop=None, NumUpdatesPerIteration=None, RangeCount=None, RemoteAsNumber=None, RestartTime=None, StaggeredStartPeriod=None, StaleTime=None, TcpWindowSize=None, TrafficGroupId=None, TtlValue=None, Type=None, UpdateInterval=None, Vpls=None):
# type: (str, str, str, str, str, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, int, int, int, str, str, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, int, int, str, str, str, str, int, int, int, int, int, int, int, str, int, str, int, bool) -> NeighborRange
"""Adds a new neighborRange resource on the server and adds it to the container.
Args
----
- AsNumMode (str(fixed | increment)): (External only) Indicates that each new session uses a different AS number.
- Authentication (str(null | md5)): Select the type of cryptographic authentication to be used for the BGP peers in this peer range.
- BfdModeOfOperation (str(multiHop | singleHop)): Indicates whether to use a single-hop or a multi-hop mode of operation for the BFD session being created with a BGP peer.
- BgpId (str): The BGP ID used in OPEN messages.
- DutIpAddress (str): The IP address of the DUT router.
- Enable4ByteAsNum (bool): Enables the 4-byte Autonomous System (AS) number of the DUT/SUT.
- EnableActAsRestarted (bool): Controls the operation of BGP Graceful Restart.
- EnableBfdRegistration (bool): Enables the BFD registration.
- EnableBgpId (bool): The BGP ID used in OPEN messages.
- EnableDiscardIxiaGeneratedRoutes (bool): If true, enables the discard of Ixia generated routes
- EnableGracefulRestart (bool): Controls the operation of BGP Graceful Restart.
- EnableLinkFlap (bool): If true, enables link flap
- EnableNextHop (bool): Used for IPv4 traffic. Controls the use of the NEXT_HOP attribute. (default = disabled)
- EnableOptionalParameters (bool): Controls how an OPEN is conducted in the presence of optional parameters.
- EnableSendIxiaSignatureWithRoutes (bool): If true, enables sending of Ixia signature with routes
- EnableStaggeredStart (bool): Controls the staggering and period of initial start messages.
- Enabled (bool): Enables or disables simulation of the router.
- Evpn (bool): If enabled, then this BGP peer range supports BGP MPLS Based Ethernet VPN per draft-ietf-l2vpn-evpn-03. Default value is false.
- EvpnNextHopCount (number): It is used to replicate the traffic among the available Next Hops in Ingress Replication mode. Default value is 1. Minimum value is 1 and maximum value is 255.
- HoldTimer (number): The period of time between KEEP-ALIVE messages sent to the DUT.
- InterfaceStartIndex (number): The assigned protocol interface ID for this SM interface.
- InterfaceType (str): The type of interface to be selected for this BGP interface. One of:Protocol Interface, DHCP, PPP
- Interfaces (str(None | /api/v1/sessions/1/ixnetwork/vport/.../interface | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range)): The interfaces that are associated with the selected interface type.
- IpV4Mdt (bool): Enables the use of this Data MDT range on the simulated interface.
- IpV4Mpls (bool): If enabled, this BGP router/peer supports the IPv4 MPLS address family.
- IpV4MplsVpn (bool): If enabled, this BGP router/peer supports the IPv4 MPLS/VPN address family.
- IpV4Multicast (bool): If enabled, this BGP router/peer supports the IPv4 multicast address family.
- IpV4MulticastVpn (bool): If true, this BGP router/peer supports the IPv4 Multicast/VPN address family.
- IpV4Unicast (bool): If enabled, this BGP router/peer supports the IPv4 unicast address family.
- IpV6Mpls (bool): If enabled, this BGP router/peer supports the IPv6 MPLS address family.
- IpV6MplsVpn (bool): If enabled, this BGP router/peer supports the IPv6 MPLS/VPN address family.
- IpV6Multicast (bool): If enabled, this BGP router/peer supports the IPv6 multicast address family.
- IpV6MulticastVpn (bool): If true, this BGP router/peer supports the IPv6 Multicast/VPN address family.
- IpV6Unicast (bool): If enabled, this BGP router/peer supports the IPv6 unicast address family.
- IsAsbr (bool): If true, it is ASBR
- LinkFlapDownTime (number): Signifies the link flap down time
- LinkFlapUpTime (number): Signifies the link flap up time
- LocalAsNumber (str): (External only) The first AS Num assigned to the simulated neighbor router. May be set for external neighbors on any port type, but only Linux-based ports may set this for internal neighbors.
- LocalIpAddress (str): The first IP address for the simulated neighbor routers and the number of routers.
- Md5Key (str): (Active only when MD5 is selected in the Authentication Type field.) (String) Enter a value to be used as a secret MD5 Key for authentication. The maximum length allowed is 255 characters.One MD5 key can be configured per BGP peer range. Sessions from all peers in this peer range will use this MD5 key if MD5 is enabled.
- NextHop (str): If enableNextHop is true, this is the IPv4 address used as the next hop. (default = 0.0.0.0)
- NumUpdatesPerIteration (number): When the protocol server operates on older ports that do not possess a local processor, this tuning parameter controls how many UPDATE messages will be sent at a time. When many routers are being simulated on such a port, changing this value may help to increase or decrease performance. (default = 1)
- RangeCount (number): The number of routers.
- RemoteAsNumber (number): The remote Autonomous System number associated with the routers.
- RestartTime (number): Controls the operation of BGP Graceful Restart.
- StaggeredStartPeriod (number): Controls the staggering and period of initial start messages.
- StaleTime (number): Controls the operation of BGP Graceful Restart.
- TcpWindowSize (number): (External neighbor only) The TCP window used for communications from the neighbor. (default = 8,192)
- TrafficGroupId (str(None | /api/v1/sessions/1/ixnetwork/traffic/.../trafficGroup)): The name of the group to which this port is assigned, for the purpose of creating traffic streams among source/destination members of the group.
- TtlValue (number): The limited number of iterations that a unit of data can experience before the data is discarded.
- Type (str(internal | external)): Indicates that the neighbor is either an internal or external router.
- UpdateInterval (number): The frequency with which UPDATE messages are sent to the DUT.
- Vpls (bool): If enabled, this BGP router/peer supports BGP VPLS per the Kompella draft.
Returns
-------
- self: This instance with all currently retrieved neighborRange resources using find and the newly added neighborRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained neighborRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, AsNumMode=None, Authentication=None, BfdModeOfOperation=None, BgpId=None, DutIpAddress=None, Enable4ByteAsNum=None, EnableActAsRestarted=None, EnableBfdRegistration=None, EnableBgpId=None, EnableDiscardIxiaGeneratedRoutes=None, EnableGracefulRestart=None, EnableLinkFlap=None, EnableNextHop=None, EnableOptionalParameters=None, EnableSendIxiaSignatureWithRoutes=None, EnableStaggeredStart=None, Enabled=None, Evpn=None, EvpnNextHopCount=None, HoldTimer=None, InterfaceStartIndex=None, InterfaceType=None, Interfaces=None, IpV4Mdt=None, IpV4Mpls=None, IpV4MplsVpn=None, IpV4Multicast=None, IpV4MulticastVpn=None, IpV4Unicast=None, IpV6Mpls=None, IpV6MplsVpn=None, IpV6Multicast=None, IpV6MulticastVpn=None, IpV6Unicast=None, IsAsbr=None, IsInterfaceLearnedInfoAvailable=None, IsLearnedInfoRefreshed=None, LinkFlapDownTime=None, LinkFlapUpTime=None, LocalAsNumber=None, LocalIpAddress=None, Md5Key=None, NextHop=None, NumUpdatesPerIteration=None, RangeCount=None, RemoteAsNumber=None, RestartTime=None, StaggeredStartPeriod=None, StaleTime=None, TcpWindowSize=None, TrafficGroupId=None, TtlValue=None, Type=None, UpdateInterval=None, Vpls=None):
# type: (str, str, str, str, str, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, int, int, int, str, str, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, int, int, str, str, str, str, int, int, int, int, int, int, int, str, int, str, int, bool) -> NeighborRange
"""Finds and retrieves neighborRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve neighborRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all neighborRange resources from the server.
Args
----
- AsNumMode (str(fixed | increment)): (External only) Indicates that each new session uses a different AS number.
- Authentication (str(null | md5)): Select the type of cryptographic authentication to be used for the BGP peers in this peer range.
- BfdModeOfOperation (str(multiHop | singleHop)): Indicates whether to use a single-hop or a multi-hop mode of operation for the BFD session being created with a BGP peer.
- BgpId (str): The BGP ID used in OPEN messages.
- DutIpAddress (str): The IP address of the DUT router.
- Enable4ByteAsNum (bool): Enables the 4-byte Autonomous System (AS) number of the DUT/SUT.
- EnableActAsRestarted (bool): Controls the operation of BGP Graceful Restart.
- EnableBfdRegistration (bool): Enables the BFD registration.
- EnableBgpId (bool): The BGP ID used in OPEN messages.
- EnableDiscardIxiaGeneratedRoutes (bool): If true, enables the discard of Ixia generated routes
- EnableGracefulRestart (bool): Controls the operation of BGP Graceful Restart.
- EnableLinkFlap (bool): If true, enables link flap
- EnableNextHop (bool): Used for IPv4 traffic. Controls the use of the NEXT_HOP attribute. (default = disabled)
- EnableOptionalParameters (bool): Controls how an OPEN is conducted in the presence of optional parameters.
- EnableSendIxiaSignatureWithRoutes (bool): If true, enables sending of Ixia signature with routes
- EnableStaggeredStart (bool): Controls the staggering and period of initial start messages.
- Enabled (bool): Enables or disables simulation of the router.
- Evpn (bool): If enabled, then this BGP peer range supports BGP MPLS Based Ethernet VPN per draft-ietf-l2vpn-evpn-03. Default value is false.
- EvpnNextHopCount (number): It is used to replicate the traffic among the available Next Hops in Ingress Replication mode. Default value is 1. Minimum value is 1 and maximum value is 255.
- HoldTimer (number): The period of time between KEEP-ALIVE messages sent to the DUT.
- InterfaceStartIndex (number): The assigned protocol interface ID for this SM interface.
- InterfaceType (str): The type of interface to be selected for this BGP interface. One of:Protocol Interface, DHCP, PPP
- Interfaces (str(None | /api/v1/sessions/1/ixnetwork/vport/.../interface | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range | /api/v1/sessions/1/ixnetwork/vport/.../range)): The interfaces that are associated with the selected interface type.
- IpV4Mdt (bool): Enables the use of this Data MDT range on the simulated interface.
- IpV4Mpls (bool): If enabled, this BGP router/peer supports the IPv4 MPLS address family.
- IpV4MplsVpn (bool): If enabled, this BGP router/peer supports the IPv4 MPLS/VPN address family.
- IpV4Multicast (bool): If enabled, this BGP router/peer supports the IPv4 multicast address family.
- IpV4MulticastVpn (bool): If true, this BGP router/peer supports the IPv4 Multicast/VPN address family.
- IpV4Unicast (bool): If enabled, this BGP router/peer supports the IPv4 unicast address family.
- IpV6Mpls (bool): If enabled, this BGP router/peer supports the IPv6 MPLS address family.
- IpV6MplsVpn (bool): If enabled, this BGP router/peer supports the IPv6 MPLS/VPN address family.
- IpV6Multicast (bool): If enabled, this BGP router/peer supports the IPv6 multicast address family.
- IpV6MulticastVpn (bool): If true, this BGP router/peer supports the IPv6 Multicast/VPN address family.
- IpV6Unicast (bool): If enabled, this BGP router/peer supports the IPv6 unicast address family.
- IsAsbr (bool): If true, it is ASBR
- IsInterfaceLearnedInfoAvailable (bool): If true, learned information is made avavilable.
- IsLearnedInfoRefreshed (bool): If true, learned information is refreshed.
- LinkFlapDownTime (number): Signifies the link flap down time
- LinkFlapUpTime (number): Signifies the link flap up time
- LocalAsNumber (str): (External only) The first AS Num assigned to the simulated neighbor router. May be set for external neighbors on any port type, but only Linux-based ports may set this for internal neighbors.
- LocalIpAddress (str): The first IP address for the simulated neighbor routers and the number of routers.
- Md5Key (str): (Active only when MD5 is selected in the Authentication Type field.) (String) Enter a value to be used as a secret MD5 Key for authentication. The maximum length allowed is 255 characters.One MD5 key can be configured per BGP peer range. Sessions from all peers in this peer range will use this MD5 key if MD5 is enabled.
- NextHop (str): If enableNextHop is true, this is the IPv4 address used as the next hop. (default = 0.0.0.0)
- NumUpdatesPerIteration (number): When the protocol server operates on older ports that do not possess a local processor, this tuning parameter controls how many UPDATE messages will be sent at a time. When many routers are being simulated on such a port, changing this value may help to increase or decrease performance. (default = 1)
- RangeCount (number): The number of routers.
- RemoteAsNumber (number): The remote Autonomous System number associated with the routers.
- RestartTime (number): Controls the operation of BGP Graceful Restart.
- StaggeredStartPeriod (number): Controls the staggering and period of initial start messages.
- StaleTime (number): Controls the operation of BGP Graceful Restart.
- TcpWindowSize (number): (External neighbor only) The TCP window used for communications from the neighbor. (default = 8,192)
- TrafficGroupId (str(None | /api/v1/sessions/1/ixnetwork/traffic/.../trafficGroup)): The name of the group to which this port is assigned, for the purpose of creating traffic streams among source/destination members of the group.
- TtlValue (number): The limited number of iterations that a unit of data can experience before the data is discarded.
- Type (str(internal | external)): Indicates that the neighbor is either an internal or external router.
- UpdateInterval (number): The frequency with which UPDATE messages are sent to the DUT.
- Vpls (bool): If enabled, this BGP router/peer supports BGP VPLS per the Kompella draft.
Returns
-------
- self: This instance with matching neighborRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of neighborRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the neighborRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def GetInterfaceAccessorIfaceList(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the getInterfaceAccessorIfaceList operation on the server.
?
getInterfaceAccessorIfaceList(async_operation=bool)string
---------------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: NOT DEFINED
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getInterfaceAccessorIfaceList', payload=payload, response_object=None)
def GetInterfaceLearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the getInterfaceLearnedInfo operation on the server.
This function allows to Get the interface learned information.
getInterfaceLearnedInfo(async_operation=bool)string
---------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: NOT DEFINED
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getInterfaceLearnedInfo', payload=payload, response_object=None)
def RefreshLearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the refreshLearnedInfo operation on the server.
This function allows to refresh the BGP learned information from the DUT.
refreshLearnedInfo(async_operation=bool)bool
--------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool: NOT DEFINED
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('refreshLearnedInfo', payload=payload, response_object=None)
| 50.307075
| 1,162
| 0.669281
| 7,775
| 68,971
| 5.850289
| 0.063923
| 0.014774
| 0.022161
| 0.031724
| 0.784175
| 0.773557
| 0.736139
| 0.730884
| 0.697577
| 0.660577
| 0
| 0.017899
| 0.228835
| 68,971
| 1,370
| 1,163
| 50.343796
| 0.837294
| 0.530339
| 0
| 0.174603
| 0
| 0
| 0.129117
| 0.03155
| 0
| 0
| 0
| 0
| 0
| 1
| 0.255952
| false
| 0
| 0.037698
| 0
| 0.474206
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
863c65045dfb6f60abd825ced52dcad3ebb48bd1
| 3,648
|
py
|
Python
|
permissions/migrations/0004_add_view_users_permission.py
|
SuviVappula/tilavarauspalvelu-core
|
ad7dec36e392a7b2927e2f825c3b0eb29b700793
|
[
"MIT"
] | null | null | null |
permissions/migrations/0004_add_view_users_permission.py
|
SuviVappula/tilavarauspalvelu-core
|
ad7dec36e392a7b2927e2f825c3b0eb29b700793
|
[
"MIT"
] | 90
|
2020-11-13T07:42:32.000Z
|
2022-03-29T08:54:20.000Z
|
permissions/migrations/0004_add_view_users_permission.py
|
SuviVappula/tilavarauspalvelu-core
|
ad7dec36e392a7b2927e2f825c3b0eb29b700793
|
[
"MIT"
] | 8
|
2021-02-10T11:31:22.000Z
|
2022-01-28T14:33:47.000Z
|
# Generated by Django 3.1.7 on 2021-03-12 06:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('permissions', '0003_change_general_role_choices'),
]
operations = [
migrations.AlterField(
model_name='generalrolepermission',
name='permission',
field=models.CharField(choices=[('can_manage_general_roles', 'Can manage general roles for the whole system'), ('can_manage_service_sector_roles', 'Can manage roles for service sectorsfor the whole system'), ('can_manage_unit_roles', 'Can manage roles for units in the whole system'), ('can_manage_reservation_units', 'Can create, edit and delete reservation units in the whole system'), ('can_manage_purposes', 'Can create, edit and delete purposes in the whole system'), ('can_manage_age_groups', 'Can create, edit and delete age groups in the whole system'), ('can_manage_districts', 'Can create, edit and delete districts in the whole system'), ('can_manage_ability_groups', 'Can create, edit and delete ability groups in the whole system'), ('can_manage_reservation_unit_types', 'Can create, edit and delete reservation unit types in the whole system'), ('can_manage_equipment_categories', 'Can create, edit and delete equipment_categories in the whole system'), ('can_manage_equipment', 'Can create, edit and delete equipment in the whole system'), ('can_view_reservations', 'Can create, edit and delete equipment in the whole system'), ('can_manage_reservations', 'Can create, edit and delete equipment in the whole system'), ('can_manage_reservations', 'Can create, edit and cancel reservations in the whole system'), ('can_view_reservations', 'Can view details of all reservations in the whole system'), ('can_manage_resources', 'Can create, edit and delete resources in the whole system'), ('can_handle_applications', 'Can handle applications in the whole system'), ('can_manage_application_rounds', 'Can create, edit and delete application rounds in the whole system'), ('can_view_users', 'Can view users in the whole system')], max_length=255, verbose_name='Permission'),
),
migrations.AlterField(
model_name='servicesectorrolepermission',
name='permission',
field=models.CharField(choices=[('can_manage_service_sector_roles', 'Can modify roles for the service sector'), ('can_manage_unit_roles', 'Can modify roles for units in the service sector'), ('can_manage_reservation_units', 'Can create, edit and delete reservation units in certain unit'), ('can_manage_application_rounds', 'Can create, edit and delete application rounds in the service sector'), ('can_handle_applications', 'Can handle applications in the service sector'), ('can_manage_reservations', 'Can create, edit and cancel reservations in the service sector'), ('can_view_reservations', 'Can view details of all reservations in the service sector'), ('can_view_users', 'Can view users in the whole system')], max_length=255, verbose_name='Permission'),
),
migrations.AlterField(
model_name='unitrolepermission',
name='permission',
field=models.CharField(choices=[('can_manage_unit_roles', 'Can modify roles for the unit'), ('can_manage_reservation_units', 'Can create, edit and delete reservation units in the unit'), ('can_manage_reservations', 'Can create, edit and cancel reservations in the unit'), ('can_view_reservations', 'Can view details of all reservations in the unit'), ('can_view_users', 'Can view users in the whole system')], max_length=255, verbose_name='Permission'),
),
]
| 125.793103
| 1,795
| 0.744243
| 494
| 3,648
| 5.301619
| 0.161943
| 0.051546
| 0.112257
| 0.116075
| 0.832379
| 0.759832
| 0.636884
| 0.557465
| 0.435281
| 0.435281
| 0
| 0.009061
| 0.152961
| 3,648
| 28
| 1,796
| 130.285714
| 0.838511
| 0.012336
| 0
| 0.409091
| 1
| 0
| 0.72702
| 0.195224
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
866b1b1b1a8f233c4cb8eee27ee901e92dabf184
| 49,228
|
py
|
Python
|
tests/components/hyperion/test_light.py
|
sunfirester/core
|
9a4f6d0c8018679f84508911d60c2cd1ec3609ce
|
[
"Apache-2.0"
] | 2
|
2020-03-29T05:32:57.000Z
|
2021-06-13T06:55:05.000Z
|
tests/components/hyperion/test_light.py
|
sunfirester/core
|
9a4f6d0c8018679f84508911d60c2cd1ec3609ce
|
[
"Apache-2.0"
] | 63
|
2020-12-21T08:17:27.000Z
|
2022-03-31T06:04:42.000Z
|
tests/components/hyperion/test_light.py
|
kmdm/home-assistant
|
4007430d7262ef035bb80affea13657fdc993b1d
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for the Hyperion integration."""
from __future__ import annotations
from datetime import timedelta
from unittest.mock import AsyncMock, Mock, call, patch
from hyperion import const
from homeassistant.components.hyperion import (
get_hyperion_device_id,
light as hyperion_light,
)
from homeassistant.components.hyperion.const import (
CONF_EFFECT_HIDE_LIST,
DEFAULT_ORIGIN,
DOMAIN,
HYPERION_MANUFACTURER_NAME,
HYPERION_MODEL_NAME,
TYPE_HYPERION_PRIORITY_LIGHT,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_EFFECT,
ATTR_HS_COLOR,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.config_entries import (
RELOAD_AFTER_UPDATE_DELAY,
SOURCE_REAUTH,
ConfigEntry,
ConfigEntryState,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_PORT,
CONF_SOURCE,
CONF_TOKEN,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.util import dt
import homeassistant.util.color as color_util
from . import (
TEST_AUTH_NOT_REQUIRED_RESP,
TEST_AUTH_REQUIRED_RESP,
TEST_CONFIG_ENTRY_ID,
TEST_ENTITY_ID_1,
TEST_ENTITY_ID_2,
TEST_ENTITY_ID_3,
TEST_HOST,
TEST_ID,
TEST_INSTANCE,
TEST_INSTANCE_1,
TEST_INSTANCE_2,
TEST_INSTANCE_3,
TEST_PORT,
TEST_PRIORITY,
TEST_PRIORITY_LIGHT_ENTITY_ID_1,
TEST_SYSINFO_ID,
add_test_config_entry,
call_registered_callback,
create_mock_client,
register_test_entity,
setup_test_config_entry,
)
from tests.common import async_fire_time_changed
COLOR_BLACK = color_util.COLORS["black"]
def _get_config_entry_from_unique_id(
hass: HomeAssistant, unique_id: str
) -> ConfigEntry | None:
for entry in hass.config_entries.async_entries(domain=DOMAIN):
if TEST_SYSINFO_ID == entry.unique_id:
return entry
return None
async def test_setup_config_entry(hass: HomeAssistant) -> None:
"""Test setting up the component via config entries."""
await setup_test_config_entry(hass, hyperion_client=create_mock_client())
assert hass.states.get(TEST_ENTITY_ID_1) is not None
async def test_setup_config_entry_not_ready_connect_fail(
hass: HomeAssistant,
) -> None:
"""Test the component not being ready."""
client = create_mock_client()
client.async_client_connect = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
assert hass.states.get(TEST_ENTITY_ID_1) is None
async def test_setup_config_entry_not_ready_switch_instance_fail(
hass: HomeAssistant,
) -> None:
"""Test the component not being ready."""
client = create_mock_client()
client.async_client_switch_instance = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
assert client.async_client_disconnect.called
assert hass.states.get(TEST_ENTITY_ID_1) is None
async def test_setup_config_entry_not_ready_load_state_fail(
hass: HomeAssistant,
) -> None:
"""Test the component not being ready."""
client = create_mock_client()
client.async_get_serverinfo = AsyncMock(
return_value={
"command": "serverinfo",
"success": False,
}
)
await setup_test_config_entry(hass, hyperion_client=client)
assert client.async_client_disconnect.called
assert hass.states.get(TEST_ENTITY_ID_1) is None
async def test_setup_config_entry_dynamic_instances(hass: HomeAssistant) -> None:
"""Test dynamic changes in the instance configuration."""
registry = er.async_get(hass)
config_entry = add_test_config_entry(hass)
master_client = create_mock_client()
master_client.instances = [TEST_INSTANCE_1, TEST_INSTANCE_2]
entity_client = create_mock_client()
entity_client.instances = master_client.instances
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
side_effect=[master_client, entity_client, entity_client],
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is not None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert master_client.set_callbacks.called
# == Inject a new instances update (stop instance 1, add instance 3)
instance_callback = master_client.set_callbacks.call_args[0][0][
f"{const.KEY_INSTANCE}-{const.KEY_UPDATE}"
]
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=entity_client,
):
await instance_callback(
{
const.KEY_SUCCESS: True,
const.KEY_DATA: [
{**TEST_INSTANCE_1, "running": False},
TEST_INSTANCE_2,
TEST_INSTANCE_3,
],
}
)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert hass.states.get(TEST_ENTITY_ID_3) is not None
# Instance 1 is stopped, it should still be registered.
assert registry.async_is_registered(TEST_ENTITY_ID_1)
# == Inject a new instances update (remove instance 1)
assert master_client.set_callbacks.called
instance_callback = master_client.set_callbacks.call_args[0][0][
f"{const.KEY_INSTANCE}-{const.KEY_UPDATE}"
]
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=entity_client,
):
await instance_callback(
{
const.KEY_SUCCESS: True,
const.KEY_DATA: [TEST_INSTANCE_2, TEST_INSTANCE_3],
}
)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert hass.states.get(TEST_ENTITY_ID_3) is not None
# Instance 1 is removed, it should not still be registered.
assert not registry.async_is_registered(TEST_ENTITY_ID_1)
# == Inject a new instances update (re-add instance 1, but not running)
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=entity_client,
):
await instance_callback(
{
const.KEY_SUCCESS: True,
const.KEY_DATA: [
{**TEST_INSTANCE_1, "running": False},
TEST_INSTANCE_2,
TEST_INSTANCE_3,
],
}
)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert hass.states.get(TEST_ENTITY_ID_3) is not None
# == Inject a new instances update (re-add instance 1, running)
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=entity_client,
):
await instance_callback(
{
const.KEY_SUCCESS: True,
const.KEY_DATA: [TEST_INSTANCE_1, TEST_INSTANCE_2, TEST_INSTANCE_3],
}
)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is not None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert hass.states.get(TEST_ENTITY_ID_3) is not None
async def test_light_basic_properies(hass: HomeAssistant) -> None:
"""Test the basic properties."""
client = create_mock_client()
await setup_test_config_entry(hass, hyperion_client=client)
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
assert entity_state.attributes["brightness"] == 255
assert entity_state.attributes["hs_color"] == (0.0, 0.0)
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
assert entity_state.attributes["effect"] == hyperion_light.KEY_EFFECT_SOLID
# By default the effect list is the 3 external sources + 'Solid'.
assert len(entity_state.attributes["effect_list"]) == 4
assert (
entity_state.attributes["supported_features"] == hyperion_light.SUPPORT_HYPERION
)
async def test_light_async_turn_on(hass: HomeAssistant) -> None:
"""Test turning the light on."""
client = create_mock_client()
await setup_test_config_entry(hass, hyperion_client=client)
# On (=), 100% (=), solid (=), [255,255,255] (=)
client.async_send_set_color = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: TEST_ENTITY_ID_1}, blocking=True
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: [255, 255, 255],
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
# On (=), 50% (!), solid (=), [255,255,255] (=)
# ===
brightness = 128
client.async_send_set_color = AsyncMock(return_value=True)
client.async_send_set_adjustment = AsyncMock(return_value=True)
client.adjustment = [{const.KEY_ID: TEST_ID}]
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_BRIGHTNESS: brightness},
blocking=True,
)
assert client.async_send_set_adjustment.call_args == call(
**{const.KEY_ADJUSTMENT: {const.KEY_BRIGHTNESS: 50, const.KEY_ID: TEST_ID}}
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: [255, 255, 255],
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
# Simulate a false return of async_send_set_adjustment
client.async_send_set_adjustment = AsyncMock(return_value=False)
client.adjustment = [{const.KEY_ID: TEST_ID}]
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_BRIGHTNESS: brightness},
blocking=True,
)
# Simulate a state callback from Hyperion.
client.adjustment = [{const.KEY_BRIGHTNESS: 50}]
call_registered_callback(client, "adjustment-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
assert entity_state.attributes["brightness"] == brightness
# On (=), 50% (=), solid (=), [0,255,255] (!)
hs_color = (180.0, 100.0)
client.async_send_set_color = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_HS_COLOR: hs_color},
blocking=True,
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: (0, 255, 255),
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
# Simulate a state callback from Hyperion.
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (0, 255, 255)},
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["hs_color"] == hs_color
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
# On (=), 100% (!), solid, [0,255,255] (=)
brightness = 255
client.async_send_set_color = AsyncMock(return_value=True)
client.async_send_set_adjustment = AsyncMock(return_value=True)
client.adjustment = [{const.KEY_ID: TEST_ID}]
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_BRIGHTNESS: brightness},
blocking=True,
)
assert client.async_send_set_adjustment.call_args == call(
**{const.KEY_ADJUSTMENT: {const.KEY_BRIGHTNESS: 100, const.KEY_ID: TEST_ID}}
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: (0, 255, 255),
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
client.adjustment = [{const.KEY_BRIGHTNESS: 100}]
call_registered_callback(client, "adjustment-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["brightness"] == brightness
# On (=), 100% (=), "USB Capture (!), [0,255,255] (=)
component = "V4L"
effect = const.KEY_COMPONENTID_TO_NAME[component]
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_component = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_EFFECT: effect},
blocking=True,
)
assert client.async_send_clear.call_args == call(
**{const.KEY_PRIORITY: TEST_PRIORITY}
)
assert client.async_send_set_component.call_args_list == [
call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_EXTERNAL_SOURCES[0],
const.KEY_STATE: False,
}
}
),
call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_EXTERNAL_SOURCES[1],
const.KEY_STATE: False,
}
}
),
call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_EXTERNAL_SOURCES[2],
const.KEY_STATE: True,
}
}
),
]
client.visible_priority = {const.KEY_COMPONENTID: component}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["icon"] == hyperion_light.ICON_EXTERNAL_SOURCE
assert entity_state.attributes["effect"] == effect
# On (=), 100% (=), "Warm Blobs" (!), [0,255,255] (=)
effect = "Warm Blobs"
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_effect = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_EFFECT: effect},
blocking=True,
)
assert client.async_send_clear.call_args == call(
**{const.KEY_PRIORITY: TEST_PRIORITY}
)
assert client.async_send_set_effect.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_EFFECT: {const.KEY_NAME: effect},
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_EFFECT,
const.KEY_OWNER: effect,
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["icon"] == hyperion_light.ICON_EFFECT
assert entity_state.attributes["effect"] == effect
# On (=), 100% (=), [0,0,255] (!)
# Ensure changing the color will move the effect to 'Solid' automatically.
hs_color = (240.0, 100.0)
client.async_send_set_color = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_HS_COLOR: hs_color},
blocking=True,
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: (0, 0, 255),
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
# Simulate a state callback from Hyperion.
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (0, 0, 255)},
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["hs_color"] == hs_color
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
assert entity_state.attributes["effect"] == hyperion_light.KEY_EFFECT_SOLID
# No calls if disconnected.
client.has_loaded_state = False
call_registered_callback(client, "client-update", {"loaded-state": False})
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_effect = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: TEST_ENTITY_ID_1}, blocking=True
)
assert not client.async_send_clear.called
assert not client.async_send_set_effect.called
async def test_light_async_turn_on_fail_async_send_set_component(
hass: HomeAssistant,
) -> None:
"""Test set_component failure when turning the light on."""
client = create_mock_client()
client.async_send_set_component = AsyncMock(return_value=False)
client.is_on = Mock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: TEST_ENTITY_ID_1}, blocking=True
)
assert client.method_calls[-1] == call.async_send_set_component(
componentstate={"component": "ALL", "state": True}
)
async def test_light_async_turn_on_fail_async_send_set_component_source(
hass: HomeAssistant,
) -> None:
"""Test async_send_set_component failure when selecting the source."""
client = create_mock_client()
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_component = AsyncMock(return_value=False)
client.is_on = Mock(return_value=True)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: TEST_ENTITY_ID_1,
ATTR_EFFECT: const.KEY_COMPONENTID_TO_NAME["V4L"],
},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_set_component(
componentstate={"component": "BOBLIGHTSERVER", "state": False}
)
async def test_light_async_turn_on_fail_async_send_clear_source(
hass: HomeAssistant,
) -> None:
"""Test async_send_clear failure when turning the light on."""
client = create_mock_client()
client.is_on = Mock(return_value=True)
client.async_send_clear = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: TEST_ENTITY_ID_1,
ATTR_EFFECT: const.KEY_COMPONENTID_TO_NAME["V4L"],
},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_clear(priority=180)
async def test_light_async_turn_on_fail_async_send_clear_effect(
hass: HomeAssistant,
) -> None:
"""Test async_send_clear failure when turning on an effect."""
client = create_mock_client()
client.is_on = Mock(return_value=True)
client.async_send_clear = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_EFFECT: "Warm Mood Blobs"},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_clear(priority=180)
async def test_light_async_turn_on_fail_async_send_set_effect(
hass: HomeAssistant,
) -> None:
"""Test async_send_set_effect failure when turning on the light."""
client = create_mock_client()
client.is_on = Mock(return_value=True)
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_effect = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_EFFECT: "Warm Mood Blobs"},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_set_effect(
priority=180, effect={"name": "Warm Mood Blobs"}, origin="Home Assistant"
)
async def test_light_async_turn_on_fail_async_send_set_color(
hass: HomeAssistant,
) -> None:
"""Test async_send_set_color failure when turning on the light."""
client = create_mock_client()
client.is_on = Mock(return_value=True)
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_color = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_HS_COLOR: (240.0, 100.0)},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_set_color(
priority=180, color=(0, 0, 255), origin="Home Assistant"
)
async def test_light_async_turn_off_fail_async_send_set_component(
hass: HomeAssistant,
) -> None:
"""Test async_send_set_component failure when turning off the light."""
client = create_mock_client()
client.async_send_set_component = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_set_component(
componentstate={"component": "LEDDEVICE", "state": False}
)
async def test_priority_light_async_turn_off_fail_async_send_clear(
hass: HomeAssistant,
) -> None:
"""Test async_send_clear failure when turning off a priority light."""
client = create_mock_client()
client.async_send_clear = AsyncMock(return_value=False)
with patch(
"homeassistant.components.hyperion.light.HyperionPriorityLight.entity_registry_enabled_default"
) as enabled_by_default_mock:
enabled_by_default_mock.return_value = True
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: TEST_PRIORITY_LIGHT_ENTITY_ID_1},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_clear(priority=180)
async def test_light_async_turn_off(hass: HomeAssistant) -> None:
"""Test turning the light off."""
client = create_mock_client()
await setup_test_config_entry(hass, hyperion_client=client)
client.async_send_set_component = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1},
blocking=True,
)
assert client.async_send_set_component.call_args == call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_LEDDEVICE,
const.KEY_STATE: False,
}
}
)
call_registered_callback(client, "components-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
# No calls if no state loaded.
client.has_loaded_state = False
client.async_send_set_component = AsyncMock(return_value=True)
call_registered_callback(client, "client-update", {"loaded-state": False})
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1},
blocking=True,
)
assert not client.async_send_set_component.called
async def test_light_async_updates_from_hyperion_client(
hass: HomeAssistant,
) -> None:
"""Test receiving a variety of Hyperion client callbacks."""
client = create_mock_client()
await setup_test_config_entry(hass, hyperion_client=client)
# Bright change gets accepted.
brightness = 10
client.adjustment = [{const.KEY_BRIGHTNESS: brightness}]
call_registered_callback(client, "adjustment-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["brightness"] == round(255 * (brightness / 100.0))
# Broken brightness value is ignored.
bad_brightness = -200
client.adjustment = [{const.KEY_BRIGHTNESS: bad_brightness}]
call_registered_callback(client, "adjustment-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["brightness"] == round(255 * (brightness / 100.0))
# Update components.
client.is_on.return_value = True
call_registered_callback(client, "components-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
client.is_on.return_value = False
call_registered_callback(client, "components-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# Update priorities (V4L)
client.is_on.return_value = True
client.visible_priority = {const.KEY_COMPONENTID: const.KEY_COMPONENTID_V4L}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["icon"] == hyperion_light.ICON_EXTERNAL_SOURCE
assert entity_state.attributes["hs_color"] == (0.0, 0.0)
assert (
entity_state.attributes["effect"]
== const.KEY_COMPONENTID_TO_NAME[const.KEY_COMPONENTID_V4L]
)
# Update priorities (Effect)
effect = "foo"
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_EFFECT,
const.KEY_OWNER: effect,
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["effect"] == effect
assert entity_state.attributes["icon"] == hyperion_light.ICON_EFFECT
assert entity_state.attributes["hs_color"] == (0.0, 0.0)
# Update priorities (Color)
rgb = (0, 100, 100)
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: rgb},
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["effect"] == hyperion_light.KEY_EFFECT_SOLID
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
assert entity_state.attributes["hs_color"] == (180.0, 100.0)
# Update priorities (None)
client.visible_priority = None
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# Update effect list
effects = [{const.KEY_NAME: "One"}, {const.KEY_NAME: "Two"}]
client.effects = effects
call_registered_callback(client, "effects-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["effect_list"] == [
hyperion_light.KEY_EFFECT_SOLID
] + [
const.KEY_COMPONENTID_TO_NAME[component]
for component in const.KEY_COMPONENTID_EXTERNAL_SOURCES
] + [
effect[const.KEY_NAME] for effect in effects
]
# Update connection status (e.g. disconnection).
# Turn on late, check state, disconnect, ensure it cannot be turned off.
client.has_loaded_state = False
call_registered_callback(client, "client-update", {"loaded-state": False})
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "unavailable"
# Update connection status (e.g. re-connection)
client.has_loaded_state = True
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: rgb},
}
call_registered_callback(client, "client-update", {"loaded-state": True})
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
async def test_full_state_loaded_on_start(hass: HomeAssistant) -> None:
"""Test receiving a variety of Hyperion client callbacks."""
client = create_mock_client()
# Update full state (should call all update methods).
brightness = 25
client.adjustment = [{const.KEY_BRIGHTNESS: brightness}]
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (0, 100, 100)},
}
client.effects = [{const.KEY_NAME: "One"}, {const.KEY_NAME: "Two"}]
await setup_test_config_entry(hass, hyperion_client=client)
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["brightness"] == round(255 * (brightness / 100.0))
assert entity_state.attributes["effect"] == hyperion_light.KEY_EFFECT_SOLID
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
assert entity_state.attributes["hs_color"] == (180.0, 100.0)
async def test_unload_entry(hass: HomeAssistant) -> None:
"""Test unload."""
client = create_mock_client()
await setup_test_config_entry(hass, hyperion_client=client)
assert hass.states.get(TEST_ENTITY_ID_1) is not None
assert client.async_client_connect.call_count == 2
assert not client.async_client_disconnect.called
entry = _get_config_entry_from_unique_id(hass, TEST_SYSINFO_ID)
assert entry
await hass.config_entries.async_unload(entry.entry_id)
assert client.async_client_disconnect.call_count == 2
async def test_version_log_warning(caplog, hass: HomeAssistant) -> None:
"""Test warning on old version."""
client = create_mock_client()
client.async_sysinfo_version = AsyncMock(return_value="2.0.0-alpha.7")
await setup_test_config_entry(hass, hyperion_client=client)
assert hass.states.get(TEST_ENTITY_ID_1) is not None
assert "Please consider upgrading" in caplog.text
async def test_version_no_log_warning(caplog, hass: HomeAssistant) -> None:
"""Test no warning on acceptable version."""
client = create_mock_client()
client.async_sysinfo_version = AsyncMock(return_value="2.0.0-alpha.9")
await setup_test_config_entry(hass, hyperion_client=client)
assert hass.states.get(TEST_ENTITY_ID_1) is not None
assert "Please consider upgrading" not in caplog.text
async def test_setup_entry_no_token_reauth(hass: HomeAssistant) -> None:
"""Verify a reauth flow when auth is required but no token provided."""
client = create_mock_client()
config_entry = add_test_config_entry(hass)
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
), patch.object(hass.config_entries.flow, "async_init") as mock_flow_init:
assert not await hass.config_entries.async_setup(config_entry.entry_id)
assert client.async_client_disconnect.called
mock_flow_init.assert_called_once_with(
DOMAIN,
context={
CONF_SOURCE: SOURCE_REAUTH,
"entry_id": config_entry.entry_id,
"unique_id": config_entry.unique_id,
},
data=config_entry.data,
)
assert config_entry.state is ConfigEntryState.SETUP_ERROR
async def test_setup_entry_bad_token_reauth(hass: HomeAssistant) -> None:
"""Verify a reauth flow when a bad token is provided."""
client = create_mock_client()
config_entry = add_test_config_entry(
hass,
data={CONF_HOST: TEST_HOST, CONF_PORT: TEST_PORT, CONF_TOKEN: "expired_token"},
)
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_NOT_REQUIRED_RESP)
# Fail to log in.
client.async_client_login = AsyncMock(return_value=False)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
), patch.object(hass.config_entries.flow, "async_init") as mock_flow_init:
assert not await hass.config_entries.async_setup(config_entry.entry_id)
assert client.async_client_disconnect.called
mock_flow_init.assert_called_once_with(
DOMAIN,
context={
CONF_SOURCE: SOURCE_REAUTH,
"entry_id": config_entry.entry_id,
"unique_id": config_entry.unique_id,
},
data=config_entry.data,
)
assert config_entry.state is ConfigEntryState.SETUP_ERROR
async def test_priority_light_async_updates(
hass: HomeAssistant,
) -> None:
"""Test receiving a variety of Hyperion client callbacks to a HyperionPriorityLight."""
priority_template = {
const.KEY_ACTIVE: True,
const.KEY_VISIBLE: True,
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (100, 100, 100)},
}
client = create_mock_client()
client.priorities = [{**priority_template}]
register_test_entity(
hass,
LIGHT_DOMAIN,
TYPE_HYPERION_PRIORITY_LIGHT,
TEST_PRIORITY_LIGHT_ENTITY_ID_1,
)
await setup_test_config_entry(hass, hyperion_client=client)
# == Scenario: Color at HA priority will show light as on.
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
assert entity_state.attributes["hs_color"] == (0.0, 0.0)
# == Scenario: Color going to black shows the light as off.
client.priorities = [
{
**priority_template,
const.KEY_VALUE: {const.KEY_RGB: COLOR_BLACK},
}
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# == Scenario: Lower priority than HA priority should have no impact on what HA
# shows when the HA priority is present.
client.priorities = [
{**priority_template, const.KEY_PRIORITY: TEST_PRIORITY - 1},
{
**priority_template,
const.KEY_VALUE: {const.KEY_RGB: COLOR_BLACK},
},
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# == Scenario: Fresh color at HA priority should turn HA entity on (even though
# there's a lower priority enabled/visible in Hyperion).
client.priorities = [
{**priority_template, const.KEY_PRIORITY: TEST_PRIORITY - 1},
{
**priority_template,
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_VALUE: {const.KEY_RGB: (100, 100, 150)},
},
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
assert entity_state.attributes["hs_color"] == (240.0, 33.333)
# == Scenario: V4L at a higher priority, with no other HA priority at all, should
# have no effect.
# Emulate HA turning the light off with black at the HA priority.
client.priorities = []
client.visible_priority = None
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# Emulate V4L turning on.
client.priorities = [
{
**priority_template,
const.KEY_PRIORITY: 240,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_V4L,
const.KEY_VALUE: {const.KEY_RGB: (100, 100, 150)},
},
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# == Scenario: A lower priority input (lower priority than HA) should have no effect.
client.priorities = [
{
**priority_template,
const.KEY_VISIBLE: True,
const.KEY_PRIORITY: TEST_PRIORITY - 1,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (255, 0, 0)},
},
{
**priority_template,
const.KEY_PRIORITY: 240,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_V4L,
const.KEY_VALUE: {const.KEY_RGB: (100, 100, 150)},
const.KEY_VISIBLE: False,
},
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# == Scenario: A non-active priority is ignored.
client.priorities = [
{
const.KEY_ACTIVE: False,
const.KEY_VISIBLE: False,
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (100, 100, 100)},
}
]
client.visible_priority = None
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# == Scenario: A priority with no ... priority ... is ignored.
client.priorities = [
{
const.KEY_ACTIVE: True,
const.KEY_VISIBLE: True,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (100, 100, 100)},
}
]
client.visible_priority = None
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
async def test_priority_light_async_updates_off_sets_black(
hass: HomeAssistant,
) -> None:
"""Test turning the HyperionPriorityLight off."""
client = create_mock_client()
client.priorities = [
{
const.KEY_ACTIVE: True,
const.KEY_VISIBLE: True,
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (100, 100, 100)},
}
]
register_test_entity(
hass,
LIGHT_DOMAIN,
TYPE_HYPERION_PRIORITY_LIGHT,
TEST_PRIORITY_LIGHT_ENTITY_ID_1,
)
await setup_test_config_entry(hass, hyperion_client=client)
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_color = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: TEST_PRIORITY_LIGHT_ENTITY_ID_1},
blocking=True,
)
assert client.async_send_clear.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
}
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: COLOR_BLACK,
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
async def test_priority_light_prior_color_preserved_after_black(
hass: HomeAssistant,
) -> None:
"""Test that color is preserved in an on->off->on cycle for a HyperionPriorityLight.
For a HyperionPriorityLight the color black is used to indicate off. This test
ensures that a cycle through 'off' will preserve the original color.
"""
priority_template = {
const.KEY_ACTIVE: True,
const.KEY_VISIBLE: True,
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
}
client = create_mock_client()
client.async_send_set_color = AsyncMock(return_value=True)
client.async_send_clear = AsyncMock(return_value=True)
client.priorities = []
client.visible_priority = None
register_test_entity(
hass,
LIGHT_DOMAIN,
TYPE_HYPERION_PRIORITY_LIGHT,
TEST_PRIORITY_LIGHT_ENTITY_ID_1,
)
await setup_test_config_entry(hass, hyperion_client=client)
# Turn the light on full green...
# On (=), 100% (=), solid (=), [0,0,255] (=)
hs_color = (240.0, 100.0)
rgb_color = (0, 0, 255)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_PRIORITY_LIGHT_ENTITY_ID_1, ATTR_HS_COLOR: hs_color},
blocking=True,
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: rgb_color,
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
client.priorities = [
{
**priority_template,
const.KEY_VALUE: {const.KEY_RGB: rgb_color},
}
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
assert entity_state.attributes["hs_color"] == hs_color
# Then turn it off.
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: TEST_PRIORITY_LIGHT_ENTITY_ID_1},
blocking=True,
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: COLOR_BLACK,
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
client.priorities = [
{
**priority_template,
const.KEY_VALUE: {const.KEY_RGB: COLOR_BLACK},
}
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# Then turn it back on and ensure it's still green.
# On (=), 100% (=), solid (=), [0,0,255] (=)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_PRIORITY_LIGHT_ENTITY_ID_1},
blocking=True,
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: rgb_color,
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
client.priorities = [
{
**priority_template,
const.KEY_VALUE: {const.KEY_RGB: rgb_color},
}
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
assert entity_state.attributes["hs_color"] == hs_color
async def test_priority_light_has_no_external_sources(hass: HomeAssistant) -> None:
"""Ensure a HyperionPriorityLight does not list external sources."""
client = create_mock_client()
client.priorities = []
register_test_entity(
hass,
LIGHT_DOMAIN,
TYPE_HYPERION_PRIORITY_LIGHT,
TEST_PRIORITY_LIGHT_ENTITY_ID_1,
)
await setup_test_config_entry(hass, hyperion_client=client)
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["effect_list"] == [hyperion_light.KEY_EFFECT_SOLID]
async def test_light_option_effect_hide_list(hass: HomeAssistant) -> None:
"""Test the effect_hide_list option."""
client = create_mock_client()
client.effects = [{const.KEY_NAME: "One"}, {const.KEY_NAME: "Two"}]
await setup_test_config_entry(
hass,
hyperion_client=client,
options={CONF_EFFECT_HIDE_LIST: ["Two", "USB Capture"]},
)
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["effect_list"] == [
"Solid",
"Boblight Server",
"Platform Capture",
"One",
]
async def test_device_info(hass: HomeAssistant) -> None:
"""Verify device information includes expected details."""
client = create_mock_client()
register_test_entity(
hass,
LIGHT_DOMAIN,
TYPE_HYPERION_PRIORITY_LIGHT,
TEST_PRIORITY_LIGHT_ENTITY_ID_1,
)
await setup_test_config_entry(hass, hyperion_client=client)
device_id = get_hyperion_device_id(TEST_SYSINFO_ID, TEST_INSTANCE)
device_registry = dr.async_get(hass)
device = device_registry.async_get_device({(DOMAIN, device_id)})
assert device
assert device.config_entries == {TEST_CONFIG_ENTRY_ID}
assert device.identifiers == {(DOMAIN, device_id)}
assert device.manufacturer == HYPERION_MANUFACTURER_NAME
assert device.model == HYPERION_MODEL_NAME
assert device.name == TEST_INSTANCE_1["friendly_name"]
entity_registry = await er.async_get_registry(hass)
entities_from_device = [
entry.entity_id
for entry in er.async_entries_for_device(entity_registry, device.id)
]
assert TEST_PRIORITY_LIGHT_ENTITY_ID_1 in entities_from_device
assert TEST_ENTITY_ID_1 in entities_from_device
async def test_lights_can_be_enabled(hass: HomeAssistant) -> None:
"""Verify lights can be enabled."""
client = create_mock_client()
await setup_test_config_entry(hass, hyperion_client=client)
entity_registry = er.async_get(hass)
entry = entity_registry.async_get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entry
assert entry.disabled
assert entry.disabled_by == er.DISABLED_INTEGRATION
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert not entity_state
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=client,
):
updated_entry = entity_registry.async_update_entity(
TEST_PRIORITY_LIGHT_ENTITY_ID_1, disabled_by=None
)
assert not updated_entry.disabled
await hass.async_block_till_done()
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(seconds=RELOAD_AFTER_UPDATE_DELAY + 1),
)
await hass.async_block_till_done()
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
async def test_deprecated_effect_names(caplog, hass: HomeAssistant) -> None:
"""Test deprecated effects function and issue a warning."""
client = create_mock_client()
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_component = AsyncMock(return_value=True)
await setup_test_config_entry(hass, hyperion_client=client)
for component in const.KEY_COMPONENTID_EXTERNAL_SOURCES:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_EFFECT: component},
blocking=True,
)
assert "Use of Hyperion effect '%s' is deprecated" % component in caplog.text
# Simulate a state callback from Hyperion.
client.visible_priority = {
const.KEY_COMPONENTID: component,
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert (
entity_state.attributes["effect"]
== const.KEY_COMPONENTID_TO_NAME[component]
)
async def test_deprecated_effect_names_not_in_effect_list(
hass: HomeAssistant,
) -> None:
"""Test deprecated effects are not in shown effect list."""
await setup_test_config_entry(hass)
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
for component in const.KEY_COMPONENTID_EXTERNAL_SOURCES:
assert component not in entity_state.attributes["effect_list"]
| 34.864023
| 103
| 0.691253
| 6,174
| 49,228
| 5.162941
| 0.055556
| 0.046932
| 0.051198
| 0.031466
| 0.826233
| 0.800477
| 0.767694
| 0.742
| 0.725279
| 0.708935
| 0
| 0.013994
| 0.2176
| 49,228
| 1,411
| 104
| 34.888731
| 0.81361
| 0.050601
| 0
| 0.651786
| 0
| 0
| 0.045429
| 0.013647
| 0
| 0
| 0
| 0
| 0.161607
| 1
| 0.000893
| false
| 0
| 0.013393
| 0
| 0.016071
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d4ac38323f60a74a7333dc32dd045fb804eaf798
| 28
|
py
|
Python
|
tests/test_qualys_quart.py
|
criteo/quart
|
7d018837d7b2f372da3fa83b1b78e4067d89557a
|
[
"Apache-2.0"
] | 1
|
2017-12-07T11:37:51.000Z
|
2017-12-07T11:37:51.000Z
|
tests/test_qualys_quart.py
|
muneebirfan/quart
|
7d018837d7b2f372da3fa83b1b78e4067d89557a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_qualys_quart.py
|
muneebirfan/quart
|
7d018837d7b2f372da3fa83b1b78e4067d89557a
|
[
"Apache-2.0"
] | 2
|
2018-06-26T15:28:10.000Z
|
2022-02-21T11:29:35.000Z
|
import pytest
#No tests
| 7
| 14
| 0.678571
| 4
| 28
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 28
| 3
| 15
| 9.333333
| 0.95
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d4afbb03e5ca739a4bf93b07361b88eb97a5acc8
| 182
|
py
|
Python
|
Server/app/docs/v2/admin/excel/extension.py
|
moreal/DMS-Backend
|
c0a3b28972739c58049a296570bb873f32c03eec
|
[
"MIT"
] | 27
|
2018-01-14T08:07:18.000Z
|
2020-01-20T14:21:17.000Z
|
Server/app/docs/v2/admin/excel/extension.py
|
moreal/DMS-Backend
|
c0a3b28972739c58049a296570bb873f32c03eec
|
[
"MIT"
] | 50
|
2018-02-12T12:51:33.000Z
|
2018-08-28T00:48:31.000Z
|
Server/app/docs/v2/admin/excel/extension.py
|
moreal/DMS-Backend
|
c0a3b28972739c58049a296570bb873f32c03eec
|
[
"MIT"
] | 10
|
2018-03-31T16:30:32.000Z
|
2021-03-02T10:30:31.000Z
|
from app.docs.v2.admin.excel import generate_excel_doc
EXTENSION_11_EXCEL_DOWNLOAD_GET = generate_excel_doc('11시 연장')
EXTENSION_12_EXCEL_DOWNLOAD_GET = generate_excel_doc('12시 연장')
| 36.4
| 62
| 0.851648
| 30
| 182
| 4.7
| 0.566667
| 0.276596
| 0.340426
| 0.340426
| 0.453901
| 0.453901
| 0
| 0
| 0
| 0
| 0
| 0.053254
| 0.071429
| 182
| 4
| 63
| 45.5
| 0.781065
| 0
| 0
| 0
| 1
| 0
| 0.065934
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d4e4502426c69a4d2468fe6b71c0e8a50d1fb62d
| 188
|
py
|
Python
|
util/func.py
|
LauBok/OnlinePokerGames
|
b12e114de8dc60d560058c0d560118912c034505
|
[
"MIT"
] | 1
|
2020-05-02T03:34:55.000Z
|
2020-05-02T03:34:55.000Z
|
util/func.py
|
LauBok/OnlinePokerGames
|
b12e114de8dc60d560058c0d560118912c034505
|
[
"MIT"
] | null | null | null |
util/func.py
|
LauBok/OnlinePokerGames
|
b12e114de8dc60d560058c0d560118912c034505
|
[
"MIT"
] | null | null | null |
from typing import Callable, Any
CaseLessThan: Callable[[int], Callable[[int], bool]] = lambda x: lambda n: x < n
Case: Callable[[Any], Callable[[Any], bool]] = lambda x: lambda n: x == n
| 47
| 80
| 0.675532
| 29
| 188
| 4.37931
| 0.413793
| 0.259843
| 0.173228
| 0.267717
| 0.314961
| 0.314961
| 0.314961
| 0
| 0
| 0
| 0
| 0
| 0.154255
| 188
| 4
| 81
| 47
| 0.798742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d4f2997db32821513aedbd9a3ed0057422f1f6a2
| 44,569
|
py
|
Python
|
nose/test_qdf.py
|
fardal/galpy
|
93a1b6fc8d138899922127086cc66184919c8cba
|
[
"BSD-3-Clause"
] | null | null | null |
nose/test_qdf.py
|
fardal/galpy
|
93a1b6fc8d138899922127086cc66184919c8cba
|
[
"BSD-3-Clause"
] | null | null | null |
nose/test_qdf.py
|
fardal/galpy
|
93a1b6fc8d138899922127086cc66184919c8cba
|
[
"BSD-3-Clause"
] | null | null | null |
# Tests of the quasiisothermaldf module
from __future__ import print_function, division
import numpy
#fiducial setup uses these
from galpy.potential import MWPotential, vcirc, omegac, epifreq, verticalfreq
from galpy.actionAngle import actionAngleAdiabatic, actionAngleStaeckel
from galpy.df import quasiisothermaldf
aAA= actionAngleAdiabatic(pot=MWPotential,c=True)
aAS= actionAngleStaeckel(pot=MWPotential,c=True,delta=0.5)
def test_meanvR_adiabatic_gl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
#In the mid-plane
assert numpy.fabs(qdf.meanvR(0.9,0.,gl=True)) < 0.01, "qdf's meanvr is not equal to zero for adiabatic approx."
#higher up
assert numpy.fabs(qdf.meanvR(0.9,0.2,gl=True)) < 0.01, "qdf's meanvr is not equal to zero for adiabatic approx."
assert numpy.fabs(qdf.meanvR(0.9,-0.25,gl=True)) < 0.01, "qdf's meanvr is not equal to zero for adiabatic approx."
return None
def test_meanvR_adiabatic_mc():
numpy.random.seed(1)
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
#In the mid-plane
assert numpy.fabs(qdf.meanvR(0.9,0.,mc=True)) < 0.01, "qdf's meanvr is not equal to zero for adiabatic approx."
#higher up
assert numpy.fabs(qdf.meanvR(0.9,0.2,mc=True)) < 0.05, "qdf's meanvr is not equal to zero for adiabatic approx."
assert numpy.fabs(qdf.meanvR(0.9,-0.25,mc=True)) < 0.05, "qdf's meanvr is not equal to zero for adiabatic approx."
return None
def test_meanvR_staeckel_gl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#In the mid-plane
assert numpy.fabs(qdf.meanvR(0.9,0.,gl=True)) < 0.01, "qdf's meanvr is not equal to zero for staeckel approx."
#higher up
assert numpy.fabs(qdf.meanvR(0.9,0.2,gl=True)) < 0.01, "qdf's meanvr is not equal to zero for staeckel approx."
assert numpy.fabs(qdf.meanvR(0.9,-0.25,gl=True)) < 0.01, "qdf's meanvr is not equal to zero for staeckel approx."
return None
def test_meanvR_staeckel_mc():
numpy.random.seed(1)
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#In the mid-plane
assert numpy.fabs(qdf.meanvR(0.9,0.,mc=True)) < 0.01, "qdf's meanvr is not equal to zero for staeckel approx."
#higher up
assert numpy.fabs(qdf.meanvR(0.9,0.2,mc=True)) < 0.05, "qdf's meanvr is not equal to zero for staeckel approx."
assert numpy.fabs(qdf.meanvR(0.9,-0.25,mc=True)) < 0.05, "qdf's meanvr is not equal to zero for staeckel approx."
return None
def test_meanvT_adiabatic_gl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
from galpy.df import dehnendf #baseline
dfc= dehnendf(profileParams=(1./4.,1.0, 0.2),
beta=0.,correct=False)
#In the mid-plane
vtp9= qdf.meanvT(0.9,0.,gl=True)
assert numpy.fabs(vtp9-dfc.meanvT(0.9)) < 0.05, "qdf's meanvT is not close to that of dehnendf"
assert vtp9 < vcirc(MWPotential,0.9), "qdf's meanvT is not less than the circular velocity (which we expect)"
#higher up
assert qdf.meanvR(0.9,0.2,gl=True) < vtp9, "qdf's meanvT above the plane is not less than in the plane (which we expect)"
assert qdf.meanvR(0.9,-0.25,gl=True) < vtp9, "qdf's meanvT above the plane is not less than in the plane (which we expect)"
return None
def test_meanvT_adiabatic_mc():
numpy.random.seed(1)
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
from galpy.df import dehnendf #baseline
dfc= dehnendf(profileParams=(1./4.,1.0, 0.2),
beta=0.,correct=False)
#In the mid-plane
vtp9= qdf.meanvT(0.9,0.,mc=True)
assert numpy.fabs(vtp9-dfc.meanvT(0.9)) < 0.05, "qdf's meanvT is not close to that of dehnendf"
assert vtp9 < vcirc(MWPotential,0.9), "qdf's meanvT is not less than the circular velocity (which we expect)"
#higher up
assert qdf.meanvR(0.9,0.2,mc=True) < vtp9, "qdf's meanvT above the plane is not less than in the plane (which we expect)"
assert qdf.meanvR(0.9,-0.25,mc=True) < vtp9, "qdf's meanvT above the plane is not less than in the plane (which we expect)"
return None
def test_meanvT_staeckel_gl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
from galpy.df import dehnendf #baseline
dfc= dehnendf(profileParams=(1./4.,1.0, 0.2),
beta=0.,correct=False)
#In the mid-plane
vtp9= qdf.meanvT(0.9,0.,gl=True)
assert numpy.fabs(vtp9-dfc.meanvT(0.9)) < 0.05, "qdf's meanvT is not close to that of dehnendf"
assert vtp9 < vcirc(MWPotential,0.9), "qdf's meanvT is not less than the circular velocity (which we expect)"
#higher up
assert qdf.meanvR(0.9,0.2,gl=True) < vtp9, "qdf's meanvT above the plane is not less than in the plane (which we expect)"
assert qdf.meanvR(0.9,-0.25,gl=True) < vtp9, "qdf's meanvT above the plane is not less than in the plane (which we expect)"
return None
def test_meanvT_staeckel_mc():
numpy.random.seed(1)
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
from galpy.df import dehnendf #baseline
dfc= dehnendf(profileParams=(1./4.,1.0, 0.2),
beta=0.,correct=False)
#In the mid-plane
vtp9= qdf.meanvT(0.9,0.,mc=True)
assert numpy.fabs(vtp9-dfc.meanvT(0.9)) < 0.05, "qdf's meanvT is not close to that of dehnendf"
assert vtp9 < vcirc(MWPotential,0.9), "qdf's meanvT is not less than the circular velocity (which we expect)"
#higher up
assert qdf.meanvR(0.9,0.2,mc=True) < vtp9, "qdf's meanvT above the plane is not less than in the plane (which we expect)"
assert qdf.meanvR(0.9,-0.25,mc=True) < vtp9, "qdf's meanvT above the plane is not less than in the plane (which we expect)"
return None
def test_meanvz_adiabatic_gl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
#In the mid-plane
assert numpy.fabs(qdf.meanvz(0.9,0.,gl=True)) < 0.01, "qdf's meanvr is not equal to zero for adiabatic approx."
#higher up
assert numpy.fabs(qdf.meanvz(0.9,0.2,gl=True)) < 0.01, "qdf's meanvr is not equal to zero for adiabatic approx."
assert numpy.fabs(qdf.meanvz(0.9,-0.25,gl=True)) < 0.01, "qdf's meanvr is not equal to zero for adiabatic approx."
return None
def test_meanvz_adiabatic_mc():
numpy.random.seed(1)
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
#In the mid-plane
assert numpy.fabs(qdf.meanvz(0.9,0.,mc=True)) < 0.01, "qdf's meanvr is not equal to zero for adiabatic approx."
#higher up
assert numpy.fabs(qdf.meanvz(0.9,0.2,mc=True)) < 0.05, "qdf's meanvr is not equal to zero for adiabatic approx."
assert numpy.fabs(qdf.meanvz(0.9,-0.25,mc=True)) < 0.05, "qdf's meanvr is not equal to zero for adiabatic approx."
return None
def test_meanvz_staeckel_gl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#In the mid-plane
assert numpy.fabs(qdf.meanvz(0.9,0.,gl=True)) < 0.01, "qdf's meanvr is not equal to zero for staeckel approx."
#higher up
assert numpy.fabs(qdf.meanvz(0.9,0.2,gl=True)) < 0.01, "qdf's meanvr is not equal to zero for staeckel approx."
assert numpy.fabs(qdf.meanvz(0.9,-0.25,gl=True)) < 0.01, "qdf's meanvr is not equal to zero for staeckel approx."
return None
def test_meanvz_staeckel_mc():
numpy.random.seed(1)
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#In the mid-plane
assert numpy.fabs(qdf.meanvz(0.9,0.,mc=True)) < 0.01, "qdf's meanvr is not equal to zero for staeckel approx."
#higher up
assert numpy.fabs(qdf.meanvz(0.9,0.2,mc=True)) < 0.05, "qdf's meanvr is not equal to zero for staeckel approx."
assert numpy.fabs(qdf.meanvz(0.9,-0.25,mc=True)) < 0.05, "qdf's meanvr is not equal to zero for staeckel approx."
return None
def test_sigmar_staeckel_gl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#In the mid-plane
assert numpy.fabs(numpy.log(qdf.sigmaR2(0.9,0.,gl=True))-2.*numpy.log(0.2)-0.2) < 0.2, "qdf's sigmaR2 deviates more than expected from input for staeckel approx."
#higher up, also w/ different ngl
assert numpy.fabs(numpy.log(qdf.sigmaR2(0.9,0.2,gl=True,ngl=20))-2.*numpy.log(0.2)-0.2) < 0.3, "qdf's sigmaR2 deviates more than expected from input for staeckel approx."
assert numpy.fabs(numpy.log(qdf.sigmaR2(0.9,-0.25,gl=True,ngl=24))-2.*numpy.log(0.2)-0.2) < 0.3, "qdf's sigmaR2 deviates more than expected from input for staeckel approx."
return None
def test_sigmar_staeckel_mc():
numpy.random.seed(1)
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#In the mid-plane
assert numpy.fabs(numpy.log(qdf.sigmaR2(0.9,0.,mc=True))-2.*numpy.log(0.2)-0.2) < 0.2, "qdf's sigmaR2 deviates more than expected from input for staeckel approx."
#higher up
assert numpy.fabs(numpy.log(qdf.sigmaR2(0.9,0.2,mc=True))-2.*numpy.log(0.2)-0.2) < 0.4, "qdf's sigmaR2 deviates more than expected from input for staeckel approx."
assert numpy.fabs(numpy.log(qdf.sigmaR2(0.9,-0.25,mc=True))-2.*numpy.log(0.2)-0.2) < 0.3, "qdf's sigmaR2 deviates more than expected from input for staeckel approx."
return None
def test_sigmat_staeckel_gl():
#colder, st closer to epicycle expectation
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#In the mid-plane
gamma= 2.*omegac(MWPotential,0.9)/epifreq(MWPotential,0.9)
assert numpy.fabs(numpy.log(qdf.sigmaT2(0.9,0.,gl=True)/qdf.sigmaR2(0.9,0.,gl=True))+2.*numpy.log(gamma)) < 0.3, "qdf's sigmaT2/sigmaR2 deviates more than expected from input for staeckel approx."
#higher up
assert numpy.fabs(numpy.log(qdf.sigmaT2(0.9,0.2,gl=True)/qdf.sigmaR2(0.9,0.2,gl=True))+2.*numpy.log(gamma)) < 0.3, "qdf's sigmaT2/sigmaR2 deviates more than expected from input for staeckel approx."
return None
def test_sigmat_staeckel_mc():
numpy.random.seed(2)
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#In the mid-plane
gamma= 2.*omegac(MWPotential,0.9)/epifreq(MWPotential,0.9)
assert numpy.fabs(numpy.log(qdf.sigmaT2(0.9,0.,mc=True)/qdf.sigmaR2(0.9,0.,mc=True))+2.*numpy.log(gamma)) < 0.3, "qdf's sigmaT2/sigmaR2 deviates more than expected from input for staeckel approx."
#higher up
assert numpy.fabs(numpy.log(qdf.sigmaT2(0.9,0.2,mc=True)/qdf.sigmaR2(0.9,0.2,mc=True))+2.*numpy.log(gamma)) < 0.3, "qdf's sigmaT2/sigmaR2 deviates more than expected from input for staeckel approx."
return None
def test_sigmaz_staeckel_gl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#In the mid-plane
assert numpy.fabs(numpy.log(qdf.sigmaz2(0.9,0.,gl=True))-2.*numpy.log(0.1)-0.2) < 0.5, "qdf's sigmaz2 deviates more than expected from input for staeckel approx."
#from Bovy & Rix 2013, we know that this has to be smaller
assert numpy.log(qdf.sigmaz2(0.9,0.,gl=True)) < 2.*numpy.log(0.1)+0.2 < 0.5, "qdf's sigmaz2 deviates more than expected from input for staeckel approx."
#higher up
assert numpy.fabs(numpy.log(qdf.sigmaz2(0.9,0.2,gl=True))-2.*numpy.log(0.1)-0.2) < 0.5, "qdf's sigmaz2 deviates more than expected from input for staeckel approx."
assert numpy.log(qdf.sigmaz2(0.9,0.2,gl=True)) < 2.*numpy.log(0.1)+0.2 < 0.5, "qdf's sigmaz2 deviates more than expected from input for staeckel approx."
assert numpy.fabs(numpy.log(qdf.sigmaz2(0.9,-0.25,gl=True))-2.*numpy.log(0.1)-0.2) < 0.5, "qdf's sigmaz2 deviates more than expected from input for staeckel approx."
assert numpy.log(qdf.sigmaz2(0.9,-0.25,gl=True)) < 2.*numpy.log(0.1)+0.2 < 0.5, "qdf's sigmaz2 deviates more than expected from input for staeckel approx."
return None
def test_sigmaz_staeckel_mc():
numpy.random.seed(1)
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#In the mid-plane
assert numpy.fabs(numpy.log(qdf.sigmaz2(0.9,0.,mc=True))-2.*numpy.log(0.1)-0.2) < 0.5, "qdf's sigmaz2 deviates more than expected from input for staeckel approx."
#from Bovy & Rix 2013, we know that this has to be smaller
assert numpy.log(qdf.sigmaz2(0.9,0.,mc=True)) < 2.*numpy.log(0.1)+0.2 < 0.5, "qdf's sigmaz2 deviates more than expected from input for staeckel approx."
#higher up
assert numpy.fabs(numpy.log(qdf.sigmaz2(0.9,0.2,mc=True))-2.*numpy.log(0.1)-0.2) < 0.5, "qdf's sigmaz2 deviates more than expected from input for staeckel approx."
assert numpy.log(qdf.sigmaz2(0.9,0.2,mc=True)) < 2.*numpy.log(0.1)+0.2 < 0.5, "qdf's sigmaz2 deviates more than expected from input for staeckel approx."
assert numpy.fabs(numpy.log(qdf.sigmaz2(0.9,-0.25,mc=True))-2.*numpy.log(0.1)-0.2) < 0.5, "qdf's sigmaz2 deviates more than expected from input for staeckel approx."
assert numpy.log(qdf.sigmaz2(0.9,-0.25,mc=True)) < 2.*numpy.log(0.1)+0.2 < 0.5, "qdf's sigmaz2 deviates more than expected from input for staeckel approx."
return None
def test_sigmarz_adiabatic_gl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
#In the mid-plane, should be zero
assert numpy.fabs(qdf.sigmaRz(0.9,0.,gl=True)) < 0.05, "qdf's sigmaRz deviates more than expected from zero in the mid-plane for adiabatic approx."
return None
def test_sigmarz_adiabatic_mc():
numpy.random.seed(1)
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
#In the mid-plane, should be zero
assert numpy.fabs(qdf.sigmaRz(0.9,0.,mc=True)) < 0.05, "qdf's sigmaRz deviates more than expected from zero in the mid-plane for adiabatic approx."
return None
def test_sigmarz_staeckel_gl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#In the mid-plane, should be zero
assert numpy.fabs(qdf.sigmaRz(0.9,0.,gl=True)) < 0.05, "qdf's sigmaRz deviates more than expected from zero in the mid-plane for staeckel approx."
return None
def test_sigmarz_staeckel_mc():
numpy.random.seed(1)
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#In the mid-plane, should be zero
assert numpy.fabs(qdf.sigmaRz(0.9,0.,mc=True)) < 0.05, "qdf's sigmaRz deviates more than expected from zero in the mid-plane for staeckel approx."
return None
def test_tilt_adiabatic_gl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
#should be zero everywhere
assert numpy.fabs(qdf.tilt(0.9,0.,gl=True)) < 0.05, "qdf's tilt deviates more than expected from zero for adiabatic approx."
assert numpy.fabs(qdf.tilt(0.9,0.2,gl=True)) < 0.05, "qdf's tilt deviates more than expected from zero for adiabatic approx."
assert numpy.fabs(qdf.tilt(0.9,-0.25,gl=True)) < 0.05, "qdf's tilt deviates more than expected from zero for adiabatic approx."
return None
def test_tilt_adiabatic_mc():
numpy.random.seed(1)
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
#should be zero everywhere
assert numpy.fabs(qdf.tilt(0.9,0.,mc=True)) < 0.05, "qdf's tilt deviates more than expected from zero for adiabatic approx."
assert numpy.fabs(qdf.tilt(0.9,0.2,mc=True)) < 0.05, "qdf's tilt deviates more than expected from zero for adiabatic approx."
assert numpy.fabs(qdf.tilt(0.9,-0.25,mc=True)) < 0.05, "qdf's tilt deviates more than expected from zero for adiabatic approx."
return None
def test_tilt_staeckel_gl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#should be zero in the mid-plane and roughly toward the GC elsewhere
assert numpy.fabs(qdf.tilt(0.9,0.,gl=True)) < 0.05, "qdf's tilt deviates more than expected from zero in the mid-plane for staeckel approx."
assert numpy.fabs(qdf.tilt(0.9,0.1,gl=True)-numpy.arctan(0.1/0.9)/numpy.pi*180.) < 2., "qdf's tilt deviates more than expected from expected for staeckel approx."
assert numpy.fabs(qdf.tilt(0.9,-0.15,gl=True)-numpy.arctan(-0.15/0.9)/numpy.pi*180.) < 2.5, "qdf's tilt deviates more than expected from expected for staeckel approx."
assert numpy.fabs(qdf.tilt(0.9,-0.25,gl=True)-numpy.arctan(-0.25/0.9)/numpy.pi*180.) < 4., "qdf's tilt deviates more than expected from expected for staeckel approx."
return None
def test_tilt_staeckel_mc():
numpy.random.seed(1)
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#should be zero in the mid-plane and roughly toward the GC elsewhere
assert numpy.fabs(qdf.tilt(0.9,0.,mc=True)) < 1., "qdf's tilt deviates more than expected from zero in the mid-plane for staeckel approx." #this is tough
assert numpy.fabs(qdf.tilt(0.9,0.1,mc=True)-numpy.arctan(0.1/0.9)/numpy.pi*180.) < 3., "qdf's tilt deviates more than expected from expected for staeckel approx."
return None
def test_estimate_hr():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
assert numpy.fabs((qdf.estimate_hr(0.9,z=0.)-0.25)/0.25) < 0.1, 'estimated scale length deviates more from input scale length than expected'
#Another one
qdf= quasiisothermaldf(1./2.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
assert numpy.fabs((qdf.estimate_hr(0.9,z=None)-0.5)/0.5) < 0.15, 'estimated scale length deviates more from input scale length than expected'
#Another one
qdf= quasiisothermaldf(1.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
assert numpy.fabs((qdf.estimate_hr(0.9,z=None,fixed_quad=False)-1.0)/1.0) < 0.3, 'estimated scale length deviates more from input scale length than expected'
return None
def test_estimate_hz():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
from scipy import integrate
from galpy.potential import evaluateDensities
expec_hz= 0.1**2./2.\
/integrate.quad(lambda x: evaluateDensities(0.9,x,MWPotential),
0.,0.125)[0]/2./numpy.pi
assert numpy.fabs((qdf.estimate_hz(0.9,z=0.125)-expec_hz)/expec_hz) < 0.1, 'estimated scale height not as expected'
assert qdf.estimate_hz(0.9,z=0.) > 1., 'estimated scale height at z=0 not very large'
#Another one
qdf= quasiisothermaldf(1./4.,0.3,0.2,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
expec_hz= 0.2**2./2.\
/integrate.quad(lambda x: evaluateDensities(0.9,x,MWPotential),
0.,0.125)[0]/2./numpy.pi
assert numpy.fabs((qdf.estimate_hz(0.9,z=0.125)-expec_hz)/expec_hz) < 0.15, 'estimated scale height not as expected'
return None
def test_estimate_hsr():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
assert numpy.fabs((qdf.estimate_hsr(0.9,z=0.)-1.0)/1.0) < 0.25, 'estimated radial-dispersion scale length deviates more from input scale length than expected'
#Another one
qdf= quasiisothermaldf(1./2.,0.2,0.1,2.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
assert numpy.fabs((qdf.estimate_hsr(0.9,z=0.05)-2.0)/2.0) < 0.25, 'estimated radial-dispersion scale length deviates more from input scale length than expected'
return None
def test_estimate_hsz():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
assert numpy.fabs((qdf.estimate_hsz(0.9,z=0.)-1.0)/1.0) < 0.25, 'estimated vertical-dispersion scale length deviates more from input scale length than expected'
#Another one
qdf= quasiisothermaldf(1./2.,0.2,0.1,1.,2.,
pot=MWPotential,aA=aAS,cutcounter=True)
assert numpy.fabs((qdf.estimate_hsz(0.9,z=0.05)-2.0)/2.0) < 0.25, 'estimated vertical-dispersion scale length deviates more from input scale length than expected'
return None
def test_meanjr():
#This is a *very* rough test against a rough estimate of the mean
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
assert numpy.fabs(numpy.log(qdf.meanjr(0.9,0.,mc=True))\
-2.*numpy.log(0.2)-0.2
+numpy.log(epifreq(MWPotential,0.9))) < 0.4, 'meanjr is not what is expected'
assert numpy.fabs(numpy.log(qdf.meanjr(0.5,0.,mc=True))\
-2.*numpy.log(0.2)-1.
+numpy.log(epifreq(MWPotential,0.5))) < 0.4, 'meanjr is not what is expected'
return None
def test_meanlz():
#This is a *very* rough test against a rough estimate of the mean
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
from galpy.df import dehnendf #baseline
dfc= dehnendf(profileParams=(1./4.,1.0, 0.2),
beta=0.,correct=False)
assert numpy.fabs(numpy.log(qdf.meanlz(0.9,0.,mc=True))\
-numpy.log(0.9*dfc.meanvT(0.9))) < 0.1, 'meanlz is not what is expected'
assert numpy.fabs(numpy.log(qdf.meanlz(0.5,0.,mc=True))\
-numpy.log(0.5*dfc.meanvT(0.5))) < 0.2, 'meanlz is not what is expected'
return None
def test_meanjz():
#This is a *very* rough test against a rough estimate of the mean
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
ldiff= numpy.log(qdf.meanjz(0.9,0.,mc=True))-2.*numpy.log(0.1)-0.2\
+numpy.log(verticalfreq(MWPotential,0.9))
#expect this to be smaller than the rough estimate, but not by more than an order of magnitude
assert ldiff > -1. and ldiff < 0., 'meanjz is not what is expected'
ldiff= numpy.log(qdf.meanjz(0.5,0.,mc=True))-2.*numpy.log(0.1)-1.0\
+numpy.log(verticalfreq(MWPotential,0.5))
assert ldiff > -1. and ldiff < 0., 'meanjz is not what is expected'
return None
def test_sampleV():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
numpy.random.seed(1)
samples= qdf.sampleV(0.8,0.1,n=1000)
#test vR
assert numpy.fabs(numpy.mean(samples[:,0])) < 0.02, 'sampleV vR mean is not zero'
assert numpy.fabs(numpy.log(numpy.std(samples[:,0]))-0.5*numpy.log(qdf.sigmaR2(0.8,0.1))) < 0.05, 'sampleV vR stddev is not equal to sigmaR'
#test vT
assert numpy.fabs(numpy.mean(samples[:,1]-qdf.meanvT(0.8,0.1))) < 0.015, 'sampleV vT mean is not equal to meanvT'
assert numpy.fabs(numpy.log(numpy.std(samples[:,1]))-0.5*numpy.log(qdf.sigmaT2(0.8,0.1))) < 0.05, 'sampleV vT stddev is not equal to sigmaT'
#test vz
assert numpy.fabs(numpy.mean(samples[:,2])) < 0.01, 'sampleV vz mean is not zero'
assert numpy.fabs(numpy.log(numpy.std(samples[:,2]))-0.5*numpy.log(qdf.sigmaz2(0.8,0.1))) < 0.05, 'sampleV vz stddev is not equal to sigmaz'
return None
def test_pvR_adiabatic():
# Test pvR by calculating its mean and stddev by Riemann sum
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,51)
pvR= numpy.array([qdf.pvR(vr,R,z) for vr in vRs])
mvR= numpy.sum(vRs*pvR)/numpy.sum(pvR)
svR= numpy.sqrt(numpy.sum(vRs**2.*pvR)/numpy.sum(pvR)-mvR**2.)
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvR not equal to zero for adiabatic actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvR not equal to that from sigmaR2 for adiabatic actions'
return None
def test_pvR_staeckel():
# Test pvR by calculating its mean and stddev by Riemann sum
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,51)
pvR= numpy.array([qdf.pvR(vr,R,z) for vr in vRs])
mvR= numpy.sum(vRs*pvR)/numpy.sum(pvR)
svR= numpy.sqrt(numpy.sum(vRs**2.*pvR)/numpy.sum(pvR)-mvR**2.)
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvR not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvR not equal to that from sigmaR2 for staeckel actions'
return None
def test_pvR_staeckel_diffngl():
# Test pvR by calculating its mean and stddev by Riemann sum
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,51)
#ngl=10
pvR= numpy.array([qdf.pvR(vr,R,z,ngl=10) for vr in vRs])
mvR= numpy.sum(vRs*pvR)/numpy.sum(pvR)
svR= numpy.sqrt(numpy.sum(vRs**2.*pvR)/numpy.sum(pvR)-mvR**2.)
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvR not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvR not equal to that from sigmaR2 for staeckel actions'
#ngl=40
pvR= numpy.array([qdf.pvR(vr,R,z,ngl=40) for vr in vRs])
mvR= numpy.sum(vRs*pvR)/numpy.sum(pvR)
svR= numpy.sqrt(numpy.sum(vRs**2.*pvR)/numpy.sum(pvR)-mvR**2.)
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvR not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvR not equal to that from sigmaR2 for staeckel actions'
#ngl=11, shouldn't work
try:
pvR= numpy.array([qdf.pvR(vr,R,z,ngl=11) for vr in vRs])
except ValueError: pass
else: raise AssertionError('pvR w/ ngl=odd did not raise ValueError')
return None
def test_pvT_adiabatic():
# Test pvT by calculating its mean and stddev by Riemann sum
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
R,z= 0.8, 0.1
vTs= numpy.linspace(0.,1.5,101)
pvT= numpy.array([qdf.pvT(vt,R,z) for vt in vTs])
mvT= numpy.sum(vTs*pvT)/numpy.sum(pvT)
svT= numpy.sqrt(numpy.sum(vTs**2.*pvT)/numpy.sum(pvT)-mvT**2.)
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvT not equal to zero for adiabatic actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvT not equal to that from sigmaT2 for adiabatic actions'
return None
def test_pvT_staeckel():
# Test pvT by calculating its mean and stddev by Riemann sum
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vTs= numpy.linspace(0.,1.5,101)
pvT= numpy.array([qdf.pvT(vt,R,z) for vt in vTs])
mvT= numpy.sum(vTs*pvT)/numpy.sum(pvT)
svT= numpy.sqrt(numpy.sum(vTs**2.*pvT)/numpy.sum(pvT)-mvT**2.)
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvT not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvT not equal to that from sigmaT2 for staeckel actions'
return None
def test_pvT_staeckel_diffngl():
# Test pvT by calculating its mean and stddev by Riemann sum
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vTs= numpy.linspace(0.,1.5,101)
#ngl=10
pvT= numpy.array([qdf.pvT(vt,R,z,ngl=10) for vt in vTs])
mvT= numpy.sum(vTs*pvT)/numpy.sum(pvT)
svT= numpy.sqrt(numpy.sum(vTs**2.*pvT)/numpy.sum(pvT)-mvT**2.)
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvT not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvT not equal to that from sigmaT2 for staeckel actions'
#ngl=40
pvT= numpy.array([qdf.pvT(vt,R,z,ngl=40) for vt in vTs])
mvT= numpy.sum(vTs*pvT)/numpy.sum(pvT)
svT= numpy.sqrt(numpy.sum(vTs**2.*pvT)/numpy.sum(pvT)-mvT**2.)
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvT not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvT not equal to that from sigmaT2 for staeckel actions'
#ngl=11, shouldn't work
try:
pvT= numpy.array([qdf.pvT(vt,R,z,ngl=11) for vt in vTs])
except ValueError: pass
else: raise AssertionError('pvT w/ ngl=odd did not raise ValueError')
return None
def test_pvz_adiabatic():
# Test pvz by calculating its mean and stddev by Riemann sum
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
R,z= 0.8, 0.1
vzs= numpy.linspace(-1.,1.,51)
pvz= numpy.array([qdf.pvz(vz,R,z) for vz in vzs])
mvz= numpy.sum(vzs*pvz)/numpy.sum(pvz)
svz= numpy.sqrt(numpy.sum(vzs**2.*pvz)/numpy.sum(pvz)-mvz**2.)
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvz not equal to zero for adiabatic actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvz not equal to that from sigmaz2 for adiabatic actions'
return None
def test_pvz_staeckel():
# Test pvz by calculating its mean and stddev by Riemann sum
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vzs= numpy.linspace(-1.,1.,51)
pvz= numpy.array([qdf.pvz(vz,R,z) for vz in vzs])
mvz= numpy.sum(vzs*pvz)/numpy.sum(pvz)
svz= numpy.sqrt(numpy.sum(vzs**2.*pvz)/numpy.sum(pvz)-mvz**2.)
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvz not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvz not equal to that from sigmaz2 for staeckel actions'
#same w/ explicit sigmaR input
pvz= numpy.array([qdf.pvz(vz,R,z,_sigmaR1=0.95*numpy.sqrt(qdf.sigmaR2(R,z))) for vz in vzs])
mvz= numpy.sum(vzs*pvz)/numpy.sum(pvz)
svz= numpy.sqrt(numpy.sum(vzs**2.*pvz)/numpy.sum(pvz)-mvz**2.)
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvz not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvz not equal to that from sigmaz2 for staeckel actions'
return None
def test_pvz_staeckel_diffngl():
# Test pvz by calculating its mean and stddev by Riemann sum
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vzs= numpy.linspace(-1.,1.,51)
#ngl=10
pvz= numpy.array([qdf.pvz(vz,R,z,ngl=10) for vz in vzs])
mvz= numpy.sum(vzs*pvz)/numpy.sum(pvz)
svz= numpy.sqrt(numpy.sum(vzs**2.*pvz)/numpy.sum(pvz)-mvz**2.)
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvz not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvz not equal to that from sigmaz2 for staeckel actions'
#ngl=40
pvz= numpy.array([qdf.pvz(vz,R,z,ngl=40) for vz in vzs])
mvz= numpy.sum(vzs*pvz)/numpy.sum(pvz)
svz= numpy.sqrt(numpy.sum(vzs**2.*pvz)/numpy.sum(pvz)-mvz**2.)
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvz not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvz not equal to that from sigmaz2 for staeckel actions'
#ngl=11, shouldn't work
try:
pvz= numpy.array([qdf.pvz(vz,R,z,ngl=11) for vz in vzs])
except ValueError: pass
else: raise AssertionError('pvz w/ ngl=odd did not raise ValueError')
return None
def test_pvz_staeckel_arrayin():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
pvz= qdf.pvz(0.05,R*numpy.ones(2),z*numpy.ones(2))
assert numpy.all(numpy.log(pvz)-numpy.log(qdf.pvz(0.05,R,z))) < 10.**-10., 'pvz calculated with R and z array input does not equal to calculated with scalar input'
return None
def test_setup_diffsetups():
#Test the different ways to setup a qdf object
#Test errors
try:
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
aA=aAS,cutcounter=True)
except IOError: pass
else: raise AssertionError("qdf setup w/o pot set did not raise exception")
try:
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,cutcounter=True)
except IOError: pass
else: raise AssertionError("qdf setup w/o aA set did not raise exception")
from galpy.potential import LogarithmicHaloPotential
try:
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=LogarithmicHaloPotential(),
aA=aAS,cutcounter=True)
except IOError: pass
else: raise AssertionError("qdf setup w/ aA potential different from pot= did not raise exception")
#qdf setup with an actionAngleIsochrone instance (issue #190)
from galpy.potential import IsochronePotential
from galpy.actionAngle import actionAngleIsochrone
ip= IsochronePotential(normalize=1.,b=2.)
try:
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=ip,
aA=actionAngleIsochrone(ip=ip),cutcounter=True)
except:
raise
raise AssertionError('quasi-isothermaldf setup w/ an actionAngleIsochrone instance failed')
#qdf setup with an actionAngleIsochrone instance should raise error if potentials are not the same
ip= IsochronePotential(normalize=1.,b=2.)
try:
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=ip,
aA=actionAngleIsochrone(ip=IsochronePotential(normalize=1.,b=2.5)),
cutcounter=True)
except IOError: pass
else: raise AssertionError("qdf setup w/ aA potential different from pot= did not raise exception")
#precompute
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True,
_precomputerg=True)
qdfnpc= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True,
_precomputerg=False)
assert numpy.fabs(qdf.rg(1.1)-qdfnpc.rg(1.1)) < 10.**-5., 'rg calculated from qdf instance w/ precomputerg set is not the same as that computed from an instance w/o it set'
def test_call_diffinoutputs():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#when specifying rg etc., first get these from a previous output
val, trg, tkappa, tnu, tOmega= qdf((0.03,0.9,0.02),_return_freqs=True)
#First check that just supplying these again works
assert numpy.fabs(val-qdf((0.03,0.9,0.02),rg=trg,kappa=tkappa,nu=tnu,
Omega=tOmega)) < 10.**-8., 'qdf calls w/ rg, and frequencies specified and w/ not specified do not agrees'
#Also calculate the frequencies
assert numpy.fabs(val-qdf((0.03,0.9,0.02),rg=trg,
kappa=epifreq(MWPotential,trg),
nu=verticalfreq(MWPotential,trg),
Omega=omegac(MWPotential,trg))) < 10.**-8., 'qdf calls w/ rg, and frequencies specified and w/ not specified do not agrees'
#Also test _return_actions
val, jr,lz,jz= qdf(0.9,0.1,0.95,0.1,0.08,_return_actions=True)
assert numpy.fabs(val-qdf((jr,lz,jz))) < 10.**-8., 'qdf call w/ R,vR,... and actions specified do not agree'
acs= aAS(0.9,0.1,0.95,0.1,0.08)
assert numpy.fabs(acs[0]-jr) < 10.**-8., 'direct calculation of jr and that returned from qdf.__call__ does not agree'
assert numpy.fabs(acs[1]-lz) < 10.**-8., 'direct calculation of lz and that returned from qdf.__call__ does not agree'
assert numpy.fabs(acs[2]-jz) < 10.**-8., 'direct calculation of jz and that returned from qdf.__call__ does not agree'
#Test unbound orbits
#Find unbound orbit, new qdf s.t. we can get UnboundError (only with
taAS= actionAngleStaeckel(pot=MWPotential,c=False,delta=0.5)
qdfnc= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,
aA=taAS,
cutcounter=True)
from galpy.actionAngle import UnboundError
try: acs= taAS(0.9,10.,-20.,0.1,10.)
except UnboundError: pass
else:
print(acs)
raise AssertionError('Test orbit in qdf that is supposed to be unbound is not')
assert qdfnc(0.9,10.,-20.,0.1,10.) < 10.**-10., 'unbound orbit does not return qdf equal to zero'
#Test negative lz
assert qdf((0.03,-0.1,0.02)) < 10.**-8., 'qdf w/ cutcounter=True and negative lz does not return 0'
assert qdf((0.03,-0.1,0.02),log=True) <= numpy.finfo(numpy.dtype(numpy.float64)).min+1., 'qdf w/ cutcounter=True and negative lz does not return 0'
#Test func
val= qdf((0.03,0.9,0.02))
fval= qdf((0.03,0.9,0.02),func=lambda x,y,z: numpy.sin(x)*numpy.cos(y)\
*numpy.exp(z))
assert numpy.fabs(val*numpy.sin(0.03)*numpy.cos(0.9)*numpy.exp(0.02)-
fval) < 10.**-8, 'qdf __call__ w/ func does not work as expected'
lfval= qdf((0.03,0.9,0.02),func=lambda x,y,z: numpy.sin(x)*numpy.cos(y)\
*numpy.exp(z),log=True)
assert numpy.fabs(numpy.log(val)+numpy.log(numpy.sin(0.03)\
*numpy.cos(0.9)\
*numpy.exp(0.02))-
lfval) < 10.**-8, 'qdf __call__ w/ func does not work as expected'
return None
def test_vmomentdensity_diffinoutputs():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#Test that we can input use different ngl
R, z= 0.8, 0.1
sigmar2= qdf.sigmaR2(R,z,gl=True)
assert numpy.fabs(numpy.log(qdf.sigmaR2(R,z,gl=True,_sigmaR1=0.95*numpy.sqrt(qdf.sigmaR2(R,z)),_sigmaz1=0.95*numpy.sqrt(qdf.sigmaz2(R,z))))-numpy.log(sigmar2)) < 0.01, 'sigmaR2 calculated w/ explicit sigmaR1 and sigmaz1 do not agree'
#Test ngl inputs further
try:
qdf.vmomentdensity(R,z,0,0,0,gl=True,ngl=11)
except ValueError: pass
else: raise AssertionError('qdf.vmomentdensity w/ ngl == odd does not raise ValueError')
surfmass, glqeval= qdf.vmomentdensity(R,z,0.,0.,0.,
gl=True,
_returngl=True)
#This shouldn't reuse gleval, but should work nonetheless
assert numpy.fabs(numpy.log(surfmass)\
-numpy.log(qdf.vmomentdensity(R,z,0.,0.,0.,
gl=True,
_glqeval=glqeval,
ngl=30))) < 0.05, 'vmomentsurfmass w/ wrong glqeval input does not work'
#Test that we can re-use jr, etc.
surfmass, jr,lz,jz= qdf.vmomentdensity(R,z,0.,0.,0.,gl=True,
_return_actions=True)
assert numpy.fabs(numpy.log(surfmass)\
-numpy.log(qdf.vmomentdensity(R,z,0.,0.,0.,gl=True,
_jr=jr,_lz=lz,_jz=jz))) < 0.01, 'surfacemass calculated from re-used actions does not agree with that before'
surfmass, jr,lz,jz, rg, kappa, nu, Omega=\
qdf.vmomentdensity(R,z,0.,0.,0.,gl=True,
_return_actions=True,
_return_freqs=True)
assert numpy.fabs(numpy.log(surfmass)\
-numpy.log(qdf.vmomentdensity(R,z,0.,0.,0.,gl=True,
_jr=jr,_lz=lz,_jz=jz,
_rg=rg,_kappa=kappa,
_nu=nu,_Omega=Omega))) < 0.01, 'surfacemass calculated from re-used actions does not agree with that before'
#Some tests of mc=True
surfmass, vrs, vts, vzs= qdf.vmomentdensity(R,z,0.,0.,0.,mc=True,gl=False,
_rawgausssamples=True,
_returnmc=True)
assert numpy.fabs(numpy.log(surfmass)-numpy.log(qdf.vmomentdensity(R,z,0.,0.,0.,
mc=True,gl=False,
_rawgausssamples=True,
_vrs=vrs,
_vts=vts,
_vzs=vzs))) < 0.0001, 'qdf.vmomentdensity w/ rawgausssamples and mc=True does not agree with that w/o rawgausssamples'
return None
def test_jmomentdensity_diffinoutputs():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#Some tests of mc=True
R,z= 0.8, 0.1
jr2surfmass, vrs, vts, vzs= qdf.jmomentdensity(R,z,2.,0.,0.,mc=True,
_returnmc=True)
assert numpy.fabs(numpy.log(jr2surfmass)-numpy.log(qdf.jmomentdensity(R,z,2.,0.,0.,
mc=True,
_vrs=vrs,
_vts=vts,
_vzs=vzs))) < 0.0001, 'qdf.jmomentdensity w/ rawgausssamples and mc=True does not agree with that w/o rawgausssamples'
return None
def test_pvz_diffinoutput():
#pvz, similarly to vmomentdensity, can output certain intermediate results
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
#test re-using the actions
R,z= 0.8,0.1
tpvz,jr,lz,jz= qdf.pvz(0.1,R,z,_return_actions=True)
assert numpy.fabs(numpy.log(qdf.pvz(0.1,R,z,_jr=jr,_lz=lz,_jz=jz))\
-numpy.log(tpvz)) < 0.001, 'qdf.pvz does not return the same result when re-using the actions'
#test re-using the frequencies
tpvz,rg,kappa,nu,Omega= qdf.pvz(0.1,R,z,_return_freqs=True)
assert numpy.fabs(numpy.log(qdf.pvz(0.1,R,z,_rg=rg,_kappa=kappa,
_nu=nu,_Omega=Omega))
-numpy.log(tpvz)) < 0.001, 'qdf.pvz does not return the same result when re-using the frequencies'
#test re-using the actions and the frequencies
tpvz,jr,lz,jz,rg,kappa,nu,Omega= qdf.pvz(0.1,R,z,_return_actions=True,
_return_freqs=True)
assert numpy.fabs(numpy.log(qdf.pvz(0.1,R,z,_jr=jr,_lz=lz,_jz=jz,
_rg=rg,_kappa=kappa,
_nu=nu,_Omega=Omega))
-numpy.log(tpvz)) < 0.001, 'qdf.pvz does not return the same result when re-using the actions and the frequencies'
return None
| 59.031788
| 238
| 0.631919
| 7,373
| 44,569
| 3.789095
| 0.049776
| 0.051187
| 0.066041
| 0.008591
| 0.888678
| 0.867022
| 0.840606
| 0.822708
| 0.810824
| 0.786341
| 0
| 0.060151
| 0.22675
| 44,569
| 754
| 239
| 59.11008
| 0.750486
| 0.066055
| 0
| 0.556478
| 0
| 0.001661
| 0.249121
| 0
| 0
| 0
| 0
| 0
| 0.262458
| 1
| 0.081395
| false
| 0.01495
| 0.026578
| 0
| 0.187708
| 0.003322
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be0c125feceb939ed03c2f0461f0402c59cb20b6
| 2,329
|
py
|
Python
|
lutin_cms.py
|
generic-library/openjpeg-lutin
|
e8a68276307bf90167300dba1366e929fc96a25b
|
[
"BSD-2-Clause"
] | null | null | null |
lutin_cms.py
|
generic-library/openjpeg-lutin
|
e8a68276307bf90167300dba1366e929fc96a25b
|
[
"BSD-2-Clause"
] | null | null | null |
lutin_cms.py
|
generic-library/openjpeg-lutin
|
e8a68276307bf90167300dba1366e929fc96a25b
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
import realog.debug as debug
import lutin.tools as tools
import os
def get_type():
return "LIBRARY"
def get_desc():
return "JPEG 2000 reade writer"
def get_licence():
return "BSD-2"
def get_compagny_type():
return "org"
def get_compagny_name():
return "openjpeg"
def get_maintainer():
return [
"David Janssens",
"Kaori Hagihara",
"Jerome Fimes",
"Giuseppe Baruffa",
"Mickael Savinaud",
"Mathieu Malaterre"
]
def get_version():
return [2,1,2]
def configure(target, my_module):
my_module.add_src_file([
'openjpeg/thirdparty/liblcms2/src/cmsalpha.c',
'openjpeg/thirdparty/liblcms2/src/cmscam02.c',
'openjpeg/thirdparty/liblcms2/src/cmscgats.c',
'openjpeg/thirdparty/liblcms2/src/cmscnvrt.c',
'openjpeg/thirdparty/liblcms2/src/cmserr.c',
'openjpeg/thirdparty/liblcms2/src/cmsgamma.c',
'openjpeg/thirdparty/liblcms2/src/cmsgmt.c',
'openjpeg/thirdparty/liblcms2/src/cmshalf.c',
'openjpeg/thirdparty/liblcms2/src/cmsintrp.c',
'openjpeg/thirdparty/liblcms2/src/cmsio0.c',
'openjpeg/thirdparty/liblcms2/src/cmsio1.c',
'openjpeg/thirdparty/liblcms2/src/cmslut.c',
'openjpeg/thirdparty/liblcms2/src/cmsmd5.c',
'openjpeg/thirdparty/liblcms2/src/cmsmtrx.c',
'openjpeg/thirdparty/liblcms2/src/cmsnamed.c',
'openjpeg/thirdparty/liblcms2/src/cmsopt.c',
'openjpeg/thirdparty/liblcms2/src/cmspack.c',
'openjpeg/thirdparty/liblcms2/src/cmspcs.c',
'openjpeg/thirdparty/liblcms2/src/cmsplugin.c',
'openjpeg/thirdparty/liblcms2/src/cmsps2.c',
'openjpeg/thirdparty/liblcms2/src/cmssamp.c',
'openjpeg/thirdparty/liblcms2/src/cmssm.c',
'openjpeg/thirdparty/liblcms2/src/cmstypes.c',
'openjpeg/thirdparty/liblcms2/src/cmsvirt.c',
'openjpeg/thirdparty/liblcms2/src/cmswtpnt.c',
'openjpeg/thirdparty/liblcms2/src/cmsxform.c',
])
my_module.add_flag('c', [
'-DMUTEX_pthread',
'-Dopenjp2_EXPORTS'
])
my_module.compile_version("c", 1999)
my_module.add_depend([
'z',
'm',
])
my_module.add_path("openjpeg/thirdparty/liblcms2/src/")
my_module.add_header_file([
'openjpeg/thirdparty/liblcms2/include/lcms2.h',
'openjpeg/thirdparty/liblcms2/include/lcms2_plugin.h',
],
destination_path="")
return True
| 28.060241
| 59
| 0.701159
| 286
| 2,329
| 5.611888
| 0.335664
| 0.325234
| 0.469782
| 0.48785
| 0.514642
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025278
| 0.150708
| 2,329
| 82
| 60
| 28.402439
| 0.786148
| 0.00687
| 0
| 0.043478
| 0
| 0
| 0.602425
| 0.5288
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115942
| false
| 0
| 0.043478
| 0.101449
| 0.275362
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
be176c5802cb5f929128e201b9fb374e4b8a6b9d
| 3,027
|
py
|
Python
|
examples/ccsd_d1.py
|
maxscheurer/pdaggerq
|
e9fef3466e0d0170afc3094ab79e603200e78dfb
|
[
"Apache-2.0"
] | 37
|
2020-09-17T19:29:18.000Z
|
2022-03-03T16:29:16.000Z
|
examples/ccsd_d1.py
|
maxscheurer/pdaggerq
|
e9fef3466e0d0170afc3094ab79e603200e78dfb
|
[
"Apache-2.0"
] | 7
|
2021-02-28T19:22:12.000Z
|
2022-02-22T15:17:47.000Z
|
examples/ccsd_d1.py
|
maxscheurer/pdaggerq
|
e9fef3466e0d0170afc3094ab79e603200e78dfb
|
[
"Apache-2.0"
] | 6
|
2021-02-16T22:34:29.000Z
|
2021-12-04T19:37:23.000Z
|
# elements of the one-electron reduced density matrix
# at the CCSD level of theory: D(pq) = <psi|(1+lambda) e(-T) p*q e(T) |psi>
import pdaggerq
from pdaggerq.parser import contracted_strings_to_tensor_terms
def main():
pq = pdaggerq.pq_helper("fermi")
print('')
print('# D1(m,n):')
print('')
# D(mn) = <psi|(1+l1 + l2) e(-T) e(m,n) e(T) |psi>
pq.set_left_operators(['1','l1','l2'])
pq.add_st_operator(1.0,['e1(m,n)'],['t1','t2'])
pq.simplify()
# grab list of fully-contracted strings, then print
d1_terms_deprince = pq.fully_contracted_strings()
d1_terms_ncr = contracted_strings_to_tensor_terms(d1_terms_deprince)
for my_term, deprince_term in zip(d1_terms_ncr, d1_terms_deprince):
print("# \t", my_term)
print("# \t", deprince_term)
print(my_term.einsum_string(update_val='opdm[o, o]',
output_variables=('m', 'n')))
print()
pq.clear()
print('')
print('# D1(e,f):')
print('')
pq.set_left_operators(['1','l1','l2'])
pq.add_st_operator(1.0,['e1(e,f)'],['t1','t2'])
pq.simplify()
# grab list of fully-contracted strings, then print
d1_terms_deprince = pq.fully_contracted_strings()
d1_terms_ncr = contracted_strings_to_tensor_terms(d1_terms_deprince)
for my_term, deprince_term in zip(d1_terms_ncr, d1_terms_deprince):
print("#\t", my_term)
print("#\t", deprince_term)
print(my_term.einsum_string(update_val='opdm[v, v]',
output_variables=('e', 'f')))
print()
pq.clear()
print('')
print('# D1(e,m):')
print('')
pq.set_left_operators(['1','l1','l2'])
pq.add_st_operator(1.0,['e1(e,m)'],['t1','t2'])
pq.simplify()
# grab list of fully-contracted strings, then print
d1_terms_deprince = pq.fully_contracted_strings()
d1_terms_ncr = contracted_strings_to_tensor_terms(d1_terms_deprince)
for my_term, deprince_term in zip(d1_terms_ncr, d1_terms_deprince):
print("#\t", my_term)
print("#\t", deprince_term)
print(my_term.einsum_string(update_val='opdm[v, o]',
output_variables=('e', 'm')))
print()
pq.clear()
print('')
print('# D1(m,e):')
print('')
pq.set_left_operators(['1','l1','l2'])
pq.add_st_operator(1.0,['e1(m,e)'],['t1','t2'])
pq.simplify()
# grab list of fully-contracted strings, then print
# note, this will be sorted e,m output so user must transpose
d1_terms_deprince = pq.fully_contracted_strings()
d1_terms_ncr = contracted_strings_to_tensor_terms(d1_terms_deprince)
for my_term, deprince_term in zip(d1_terms_ncr, d1_terms_deprince):
print("#\t", my_term)
print("#\t", deprince_term)
print(my_term.einsum_string(update_val='opdm[o, v]',
output_variables=('m', 'e')))
print()
pq.clear()
if __name__ == "__main__":
main()
| 32.548387
| 76
| 0.60555
| 434
| 3,027
| 3.935484
| 0.195853
| 0.081967
| 0.105386
| 0.073185
| 0.795667
| 0.778103
| 0.764052
| 0.734778
| 0.734778
| 0.734778
| 0
| 0.025851
| 0.233234
| 3,027
| 93
| 77
| 32.548387
| 0.710039
| 0.144037
| 0
| 0.666667
| 0
| 0
| 0.078591
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015152
| false
| 0
| 0.030303
| 0
| 0.045455
| 0.424242
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
078f65df672ca6204b49564ff719361d18db5c03
| 1,154
|
py
|
Python
|
flumine/exceptions.py
|
mberk/flumine
|
6216bcc233326cf07852fca9c7d39a18cee265ad
|
[
"MIT"
] | 77
|
2017-12-09T07:10:18.000Z
|
2022-03-03T09:50:35.000Z
|
flumine/exceptions.py
|
mberk/flumine
|
6216bcc233326cf07852fca9c7d39a18cee265ad
|
[
"MIT"
] | 423
|
2017-01-21T07:26:51.000Z
|
2022-03-04T11:13:54.000Z
|
flumine/exceptions.py
|
mberk/flumine
|
6216bcc233326cf07852fca9c7d39a18cee265ad
|
[
"MIT"
] | 47
|
2017-04-22T17:31:41.000Z
|
2022-01-11T08:52:37.000Z
|
class FlumineException(Exception):
"""Base class for Flumine Errors"""
pass
class RunError(FlumineException):
"""Exception raised if error
in `Flumine.run()``
"""
def __init__(self, message):
super(RunError, self).__init__(message)
class ListenerError(FlumineException):
"""Error raised if error in Listener"""
def __int__(self, message):
super(ListenerError, self).__init__(message)
class OrderError(FlumineException):
"""Exception raised if incorrect
order/order_type requested.
"""
def __init__(self, message):
super(OrderError, self).__init__(message)
class OrderUpdateError(FlumineException):
"""Exception raised if order update
incorrect.
"""
def __init__(self, message):
super(OrderUpdateError, self).__init__(message)
class OrderExecutionError(FlumineException):
"""Exception raised error in package during
execution.
"""
pass
class ControlError(FlumineException):
"""Exception raised if order voilates
a control.
"""
def __init__(self, message):
super(ControlError, self).__init__(message)
| 20.607143
| 55
| 0.681109
| 114
| 1,154
| 6.535088
| 0.324561
| 0.201342
| 0.208054
| 0.177181
| 0.225503
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211438
| 1,154
| 55
| 56
| 20.981818
| 0.818681
| 0.267764
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.263158
| false
| 0.105263
| 0
| 0
| 0.631579
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
07c059aef44692ff8b71c52ac5b27657dcd482e7
| 47,225
|
py
|
Python
|
neutron/tests/unit/cisco/n1kv/test_n1kv_db.py
|
yagosys/neutron
|
005fec677c3bf8b2aa0df68c4aedc2b708ec7caf
|
[
"Apache-2.0"
] | 1
|
2016-01-13T14:29:07.000Z
|
2016-01-13T14:29:07.000Z
|
neutron/tests/unit/cisco/n1kv/test_n1kv_db.py
|
yagosys/neutron
|
005fec677c3bf8b2aa0df68c4aedc2b708ec7caf
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/cisco/n1kv/test_n1kv_db.py
|
yagosys/neutron
|
005fec677c3bf8b2aa0df68c4aedc2b708ec7caf
|
[
"Apache-2.0"
] | 3
|
2015-04-03T08:47:02.000Z
|
2020-02-05T10:40:45.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Juergen Brendel, Cisco Systems Inc.
# @author: Abhishek Raut, Cisco Systems Inc.
# @author: Rudrajit Tapadar, Cisco Systems Inc.
from sqlalchemy.orm import exc as s_exc
from testtools import matchers
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.plugins.cisco.common import cisco_constants
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.db import n1kv_db_v2
from neutron.plugins.cisco.db import n1kv_models_v2
from neutron.tests import base
from neutron.tests.unit import test_db_plugin as test_plugin
PHYS_NET = 'physnet1'
PHYS_NET_2 = 'physnet2'
VLAN_MIN = 10
VLAN_MAX = 19
VLAN_RANGES = {PHYS_NET: [(VLAN_MIN, VLAN_MAX)]}
UPDATED_VLAN_RANGES = {PHYS_NET: [(VLAN_MIN + 20, VLAN_MAX + 20)],
PHYS_NET_2: [(VLAN_MIN + 40, VLAN_MAX + 40)]}
VXLAN_MIN = 5000
VXLAN_MAX = 5009
VXLAN_RANGES = [(VXLAN_MIN, VXLAN_MAX)]
UPDATED_VXLAN_RANGES = [(VXLAN_MIN + 20, VXLAN_MAX + 20)]
SEGMENT_RANGE = '200-220'
SEGMENT_RANGE_MIN_OVERLAP = '210-230'
SEGMENT_RANGE_MAX_OVERLAP = '190-209'
SEGMENT_RANGE_OVERLAP = '190-230'
TEST_NETWORK_ID = 'abcdefghijklmnopqrstuvwxyz'
TEST_NETWORK_ID2 = 'abcdefghijklmnopqrstuvwxy2'
TEST_NETWORK_ID3 = 'abcdefghijklmnopqrstuvwxy3'
TEST_NETWORK_PROFILE = {'name': 'test_profile',
'segment_type': 'vlan',
'physical_network': 'physnet1',
'segment_range': '10-19'}
TEST_NETWORK_PROFILE_2 = {'name': 'test_profile_2',
'segment_type': 'vlan',
'physical_network': 'physnet1',
'segment_range': SEGMENT_RANGE}
TEST_NETWORK_PROFILE_VXLAN = {'name': 'test_profile',
'segment_type': 'overlay',
'sub_type': 'native_vxlan',
'segment_range': '5000-5009',
'multicast_ip_range': '239.0.0.70-239.0.0.80'}
TEST_POLICY_PROFILE = {'id': '4a417990-76fb-11e2-bcfd-0800200c9a66',
'name': 'test_policy_profile'}
TEST_NETWORK_PROFILE_MULTI_SEGMENT = {'name': 'test_profile',
'segment_type': 'multi-segment'}
TEST_NETWORK_PROFILE_VLAN_TRUNK = {'name': 'test_profile',
'segment_type': 'trunk',
'sub_type': 'vlan'}
TEST_NETWORK_PROFILE_VXLAN_TRUNK = {'name': 'test_profile',
'segment_type': 'trunk',
'sub_type': 'overlay'}
def _create_test_network_profile_if_not_there(session,
profile=TEST_NETWORK_PROFILE):
try:
_profile = session.query(n1kv_models_v2.NetworkProfile).filter_by(
name=profile['name']).one()
except s_exc.NoResultFound:
_profile = n1kv_db_v2.create_network_profile(session, profile)
return _profile
def _create_test_policy_profile_if_not_there(session,
profile=TEST_POLICY_PROFILE):
try:
_profile = session.query(n1kv_models_v2.PolicyProfile).filter_by(
name=profile['name']).one()
except s_exc.NoResultFound:
_profile = n1kv_db_v2.create_policy_profile(profile)
return _profile
class VlanAllocationsTest(base.BaseTestCase):
def setUp(self):
super(VlanAllocationsTest, self).setUp()
db.configure_db()
self.session = db.get_session()
n1kv_db_v2.sync_vlan_allocations(self.session, VLAN_RANGES)
self.addCleanup(db.clear_db)
def test_sync_vlan_allocations_outside_segment_range(self):
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
VLAN_MIN - 1)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
VLAN_MAX + 1)
n1kv_db_v2.sync_vlan_allocations(self.session, UPDATED_VLAN_RANGES)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
VLAN_MIN + 20 - 1)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
VLAN_MAX + 20 + 1)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET_2,
VLAN_MIN + 40 - 1)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET_2,
VLAN_MAX + 40 + 1)
n1kv_db_v2.sync_vlan_allocations(self.session, VLAN_RANGES)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET_2,
VLAN_MIN + 20)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET_2,
VLAN_MIN + 20)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET_2,
VLAN_MAX + 20)
def test_sync_vlan_allocations_unallocated_vlans(self):
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MIN).allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MIN + 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MAX - 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MAX).allocated)
n1kv_db_v2.sync_vlan_allocations(self.session, UPDATED_VLAN_RANGES)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MIN + 20).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MIN + 20 + 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MAX + 20 - 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session, PHYS_NET,
VLAN_MAX + 20).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET_2,
VLAN_MIN + 40).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET_2,
VLAN_MIN + 40 + 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET_2,
VLAN_MAX + 40 - 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET_2,
VLAN_MAX + 40).
allocated)
def test_vlan_pool(self):
vlan_ids = set()
p = _create_test_network_profile_if_not_there(self.session)
for x in xrange(VLAN_MIN, VLAN_MAX + 1):
(physical_network, seg_type,
vlan_id, m_ip) = n1kv_db_v2.reserve_vlan(self.session, p)
self.assertEqual(physical_network, PHYS_NET)
self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
vlan_ids.add(vlan_id)
self.assertRaises(n_exc.NoNetworkAvailable,
n1kv_db_v2.reserve_vlan,
self.session,
p)
n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_ids.pop(),
VLAN_RANGES)
physical_network, seg_type, vlan_id, m_ip = (n1kv_db_v2.reserve_vlan(
self.session, p))
self.assertEqual(physical_network, PHYS_NET)
self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
vlan_ids.add(vlan_id)
for vlan_id in vlan_ids:
n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_id,
VLAN_RANGES)
def test_specific_vlan_inside_pool(self):
vlan_id = VLAN_MIN + 5
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
vlan_id).allocated)
n1kv_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id)
self.assertTrue(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
vlan_id).allocated)
self.assertRaises(n_exc.VlanIdInUse,
n1kv_db_v2.reserve_specific_vlan,
self.session,
PHYS_NET,
vlan_id)
n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_id, VLAN_RANGES)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
vlan_id).allocated)
def test_specific_vlan_outside_pool(self):
vlan_id = VLAN_MAX + 5
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
vlan_id)
n1kv_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id)
self.assertTrue(n1kv_db_v2.get_vlan_allocation(self.session, PHYS_NET,
vlan_id).allocated)
self.assertRaises(n_exc.VlanIdInUse,
n1kv_db_v2.reserve_specific_vlan,
self.session,
PHYS_NET,
vlan_id)
n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_id, VLAN_RANGES)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
vlan_id)
class VxlanAllocationsTest(base.BaseTestCase,
n1kv_db_v2.NetworkProfile_db_mixin):
def setUp(self):
super(VxlanAllocationsTest, self).setUp()
db.configure_db()
self.session = db.get_session()
n1kv_db_v2.sync_vxlan_allocations(self.session, VXLAN_RANGES)
self.addCleanup(db.clear_db)
def test_sync_vxlan_allocations_outside_segment_range(self):
self.assertIsNone(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MIN - 1))
self.assertIsNone(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MAX + 1))
n1kv_db_v2.sync_vxlan_allocations(self.session, UPDATED_VXLAN_RANGES)
self.assertIsNone(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MIN + 20 - 1))
self.assertIsNone(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MAX + 20 + 1))
def test_sync_vxlan_allocations_unallocated_vxlans(self):
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MIN).allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MIN + 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MAX - 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MAX).allocated)
n1kv_db_v2.sync_vxlan_allocations(self.session, UPDATED_VXLAN_RANGES)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MIN + 20).
allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MIN + 20 + 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MAX + 20 - 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MAX + 20).
allocated)
def test_vxlan_pool(self):
vxlan_ids = set()
profile = n1kv_db_v2.create_network_profile(self.session,
TEST_NETWORK_PROFILE_VXLAN)
for x in xrange(VXLAN_MIN, VXLAN_MAX + 1):
vxlan = n1kv_db_v2.reserve_vxlan(self.session, profile)
vxlan_id = vxlan[2]
self.assertThat(vxlan_id, matchers.GreaterThan(VXLAN_MIN - 1))
self.assertThat(vxlan_id, matchers.LessThan(VXLAN_MAX + 1))
vxlan_ids.add(vxlan_id)
self.assertRaises(n_exc.NoNetworkAvailable,
n1kv_db_v2.reserve_vxlan,
self.session,
profile)
n1kv_db_v2.release_vxlan(self.session, vxlan_ids.pop(), VXLAN_RANGES)
vxlan = n1kv_db_v2.reserve_vxlan(self.session, profile)
vxlan_id = vxlan[2]
self.assertThat(vxlan_id, matchers.GreaterThan(VXLAN_MIN - 1))
self.assertThat(vxlan_id, matchers.LessThan(VXLAN_MAX + 1))
vxlan_ids.add(vxlan_id)
for vxlan_id in vxlan_ids:
n1kv_db_v2.release_vxlan(self.session, vxlan_id, VXLAN_RANGES)
n1kv_db_v2.delete_network_profile(self.session, profile.id)
def test_specific_vxlan_inside_pool(self):
vxlan_id = VXLAN_MIN + 5
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id).allocated)
n1kv_db_v2.reserve_specific_vxlan(self.session, vxlan_id)
self.assertTrue(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id).allocated)
self.assertRaises(c_exc.VxlanIdInUse,
n1kv_db_v2.reserve_specific_vxlan,
self.session,
vxlan_id)
n1kv_db_v2.release_vxlan(self.session, vxlan_id, VXLAN_RANGES)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id).allocated)
def test_specific_vxlan_outside_pool(self):
vxlan_id = VXLAN_MAX + 5
self.assertIsNone(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id))
n1kv_db_v2.reserve_specific_vxlan(self.session, vxlan_id)
self.assertTrue(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id).allocated)
self.assertRaises(c_exc.VxlanIdInUse,
n1kv_db_v2.reserve_specific_vxlan,
self.session,
vxlan_id)
n1kv_db_v2.release_vxlan(self.session, vxlan_id, VXLAN_RANGES)
self.assertIsNone(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id))
class NetworkBindingsTest(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
super(NetworkBindingsTest, self).setUp()
db.configure_db()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def test_add_network_binding(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(self.session)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'vlan',
PHYS_NET, 1234, '0.0.0.0', p.id, None)
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'vlan')
self.assertEqual(binding.physical_network, PHYS_NET)
self.assertEqual(binding.segmentation_id, 1234)
def test_create_multi_segment_network(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_MULTI_SEGMENT)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'multi-segment',
None, 0, '0.0.0.0', p.id, None)
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'multi-segment')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
def test_add_multi_segment_binding(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_MULTI_SEGMENT)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'multi-segment',
None, 0, '0.0.0.0', p.id,
[(TEST_NETWORK_ID2, TEST_NETWORK_ID3)])
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'multi-segment')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
ms_binding = (n1kv_db_v2.get_multi_segment_network_binding(
self.session, TEST_NETWORK_ID,
(TEST_NETWORK_ID2, TEST_NETWORK_ID3)))
self.assertIsNotNone(ms_binding)
self.assertEqual(ms_binding.multi_segment_id, TEST_NETWORK_ID)
self.assertEqual(ms_binding.segment1_id, TEST_NETWORK_ID2)
self.assertEqual(ms_binding.segment2_id, TEST_NETWORK_ID3)
ms_members = (n1kv_db_v2.get_multi_segment_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(ms_members,
[(TEST_NETWORK_ID2, TEST_NETWORK_ID3)])
self.assertTrue(n1kv_db_v2.is_multi_segment_member(
self.session, TEST_NETWORK_ID2))
self.assertTrue(n1kv_db_v2.is_multi_segment_member(
self.session, TEST_NETWORK_ID3))
n1kv_db_v2.del_multi_segment_binding(
self.session, TEST_NETWORK_ID,
[(TEST_NETWORK_ID2, TEST_NETWORK_ID3)])
ms_members = (n1kv_db_v2.get_multi_segment_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(ms_members, [])
def test_create_vlan_trunk_network(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_VLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'trunk',
None, 0, '0.0.0.0', p.id, None)
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'trunk')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
def test_create_vxlan_trunk_network(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_VXLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'trunk',
None, 0, '0.0.0.0', p.id, None)
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'trunk')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
def test_add_vlan_trunk_binding(self):
with self.network() as network1:
with self.network() as network2:
TEST_NETWORK_ID = network1['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
TEST_NETWORK_ID2 = network2['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID2)
p_v = _create_test_network_profile_if_not_there(self.session)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID2, 'vlan',
PHYS_NET, 1234, '0.0.0.0', p_v.id, None)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_VLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'trunk',
None, 0, '0.0.0.0', p.id, [(TEST_NETWORK_ID2, 0)])
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'trunk')
self.assertEqual(binding.physical_network, PHYS_NET)
self.assertEqual(binding.segmentation_id, 0)
t_binding = (n1kv_db_v2.get_trunk_network_binding(
self.session, TEST_NETWORK_ID,
(TEST_NETWORK_ID2, 0)))
self.assertIsNotNone(t_binding)
self.assertEqual(t_binding.trunk_segment_id, TEST_NETWORK_ID)
self.assertEqual(t_binding.segment_id, TEST_NETWORK_ID2)
self.assertEqual(t_binding.dot1qtag, '0')
t_members = (n1kv_db_v2.get_trunk_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(t_members,
[(TEST_NETWORK_ID2, '0')])
self.assertTrue(n1kv_db_v2.is_trunk_member(
self.session, TEST_NETWORK_ID2))
n1kv_db_v2.del_trunk_segment_binding(
self.session, TEST_NETWORK_ID,
[(TEST_NETWORK_ID2, '0')])
t_members = (n1kv_db_v2.get_multi_segment_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(t_members, [])
def test_add_vxlan_trunk_binding(self):
with self.network() as network1:
with self.network() as network2:
TEST_NETWORK_ID = network1['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
TEST_NETWORK_ID2 = network2['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID2)
p_v = _create_test_network_profile_if_not_there(
self.session, TEST_NETWORK_PROFILE_VXLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID2, 'overlay',
None, 5100, '224.10.10.10', p_v.id, None)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_VXLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'trunk',
None, 0, '0.0.0.0', p.id,
[(TEST_NETWORK_ID2, 5)])
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'trunk')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
t_binding = (n1kv_db_v2.get_trunk_network_binding(
self.session, TEST_NETWORK_ID,
(TEST_NETWORK_ID2, '5')))
self.assertIsNotNone(t_binding)
self.assertEqual(t_binding.trunk_segment_id, TEST_NETWORK_ID)
self.assertEqual(t_binding.segment_id, TEST_NETWORK_ID2)
self.assertEqual(t_binding.dot1qtag, '5')
t_members = (n1kv_db_v2.get_trunk_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(t_members,
[(TEST_NETWORK_ID2, '5')])
self.assertTrue(n1kv_db_v2.is_trunk_member(
self.session, TEST_NETWORK_ID2))
n1kv_db_v2.del_trunk_segment_binding(
self.session, TEST_NETWORK_ID,
[(TEST_NETWORK_ID2, '5')])
t_members = (n1kv_db_v2.get_multi_segment_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(t_members, [])
class NetworkProfileTests(base.BaseTestCase,
n1kv_db_v2.NetworkProfile_db_mixin):
def setUp(self):
super(NetworkProfileTests, self).setUp()
db.configure_db()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def test_create_network_profile(self):
_db_profile = n1kv_db_v2.create_network_profile(self.session,
TEST_NETWORK_PROFILE)
self.assertIsNotNone(_db_profile)
db_profile = (self.session.query(n1kv_models_v2.NetworkProfile).
filter_by(name=TEST_NETWORK_PROFILE['name']).one())
self.assertIsNotNone(db_profile)
self.assertEqual(_db_profile.id, db_profile.id)
self.assertEqual(_db_profile.name, db_profile.name)
self.assertEqual(_db_profile.segment_type, db_profile.segment_type)
self.assertEqual(_db_profile.segment_range, db_profile.segment_range)
self.assertEqual(_db_profile.multicast_ip_index,
db_profile.multicast_ip_index)
self.assertEqual(_db_profile.multicast_ip_range,
db_profile.multicast_ip_range)
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
def test_create_multi_segment_network_profile(self):
_db_profile = (n1kv_db_v2.create_network_profile(
self.session, TEST_NETWORK_PROFILE_MULTI_SEGMENT))
self.assertIsNotNone(_db_profile)
db_profile = (
self.session.query(
n1kv_models_v2.NetworkProfile).filter_by(
name=TEST_NETWORK_PROFILE_MULTI_SEGMENT['name'])
.one())
self.assertIsNotNone(db_profile)
self.assertEqual(_db_profile.id, db_profile.id)
self.assertEqual(_db_profile.name, db_profile.name)
self.assertEqual(_db_profile.segment_type, db_profile.segment_type)
self.assertEqual(_db_profile.segment_range, db_profile.segment_range)
self.assertEqual(_db_profile.multicast_ip_index,
db_profile.multicast_ip_index)
self.assertEqual(_db_profile.multicast_ip_range,
db_profile.multicast_ip_range)
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
def test_create_vlan_trunk_network_profile(self):
_db_profile = (n1kv_db_v2.create_network_profile(
self.session, TEST_NETWORK_PROFILE_VLAN_TRUNK))
self.assertIsNotNone(_db_profile)
db_profile = (self.session.query(n1kv_models_v2.NetworkProfile).
filter_by(name=TEST_NETWORK_PROFILE_VLAN_TRUNK['name']).
one())
self.assertIsNotNone(db_profile)
self.assertEqual(_db_profile.id, db_profile.id)
self.assertEqual(_db_profile.name, db_profile.name)
self.assertEqual(_db_profile.segment_type, db_profile.segment_type)
self.assertEqual(_db_profile.segment_range, db_profile.segment_range)
self.assertEqual(_db_profile.multicast_ip_index,
db_profile.multicast_ip_index)
self.assertEqual(_db_profile.multicast_ip_range,
db_profile.multicast_ip_range)
self.assertEqual(_db_profile.sub_type, db_profile.sub_type)
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
def test_create_vxlan_trunk_network_profile(self):
_db_profile = (n1kv_db_v2.create_network_profile(
self.session, TEST_NETWORK_PROFILE_VXLAN_TRUNK))
self.assertIsNotNone(_db_profile)
db_profile = (self.session.query(n1kv_models_v2.NetworkProfile).
filter_by(name=TEST_NETWORK_PROFILE_VXLAN_TRUNK['name']).
one())
self.assertIsNotNone(db_profile)
self.assertEqual(_db_profile.id, db_profile.id)
self.assertEqual(_db_profile.name, db_profile.name)
self.assertEqual(_db_profile.segment_type, db_profile.segment_type)
self.assertEqual(_db_profile.segment_range, db_profile.segment_range)
self.assertEqual(_db_profile.multicast_ip_index,
db_profile.multicast_ip_index)
self.assertEqual(_db_profile.multicast_ip_range,
db_profile.multicast_ip_range)
self.assertEqual(_db_profile.sub_type, db_profile.sub_type)
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
def test_create_network_profile_overlap(self):
_db_profile = n1kv_db_v2.create_network_profile(self.session,
TEST_NETWORK_PROFILE_2)
ctx = context.get_admin_context()
TEST_NETWORK_PROFILE_2['name'] = 'net-profile-min-overlap'
TEST_NETWORK_PROFILE_2['segment_range'] = SEGMENT_RANGE_MIN_OVERLAP
test_net_profile = {'network_profile': TEST_NETWORK_PROFILE_2}
self.assertRaises(n_exc.InvalidInput,
self.create_network_profile,
ctx,
test_net_profile)
TEST_NETWORK_PROFILE_2['name'] = 'net-profile-max-overlap'
TEST_NETWORK_PROFILE_2['segment_range'] = SEGMENT_RANGE_MAX_OVERLAP
test_net_profile = {'network_profile': TEST_NETWORK_PROFILE_2}
self.assertRaises(n_exc.InvalidInput,
self.create_network_profile,
ctx,
test_net_profile)
TEST_NETWORK_PROFILE_2['name'] = 'net-profile-overlap'
TEST_NETWORK_PROFILE_2['segment_range'] = SEGMENT_RANGE_OVERLAP
test_net_profile = {'network_profile': TEST_NETWORK_PROFILE_2}
self.assertRaises(n_exc.InvalidInput,
self.create_network_profile,
ctx,
test_net_profile)
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
def test_delete_network_profile(self):
try:
profile = (self.session.query(n1kv_models_v2.NetworkProfile).
filter_by(name=TEST_NETWORK_PROFILE['name']).one())
except s_exc.NoResultFound:
profile = n1kv_db_v2.create_network_profile(self.session,
TEST_NETWORK_PROFILE)
n1kv_db_v2.delete_network_profile(self.session, profile.id)
try:
self.session.query(n1kv_models_v2.NetworkProfile).filter_by(
name=TEST_NETWORK_PROFILE['name']).one()
except s_exc.NoResultFound:
pass
else:
self.fail("Network Profile (%s) was not deleted" %
TEST_NETWORK_PROFILE['name'])
def test_update_network_profile(self):
TEST_PROFILE_1 = {'name': 'test_profile_1'}
profile = _create_test_network_profile_if_not_there(self.session)
updated_profile = n1kv_db_v2.update_network_profile(self.session,
profile.id,
TEST_PROFILE_1)
self.assertEqual(updated_profile.name, TEST_PROFILE_1['name'])
n1kv_db_v2.delete_network_profile(self.session, profile.id)
def test_get_network_profile(self):
profile = n1kv_db_v2.create_network_profile(self.session,
TEST_NETWORK_PROFILE)
got_profile = n1kv_db_v2.get_network_profile(self.session, profile.id)
self.assertEqual(profile.id, got_profile.id)
self.assertEqual(profile.name, got_profile.name)
n1kv_db_v2.delete_network_profile(self.session, profile.id)
def test_get_network_profiles(self):
test_profiles = [{'name': 'test_profile1',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '200-210'},
{'name': 'test_profile2',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '211-220'},
{'name': 'test_profile3',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '221-230'},
{'name': 'test_profile4',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '231-240'},
{'name': 'test_profile5',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '241-250'},
{'name': 'test_profile6',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '251-260'},
{'name': 'test_profile7',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '261-270'}]
[n1kv_db_v2.create_network_profile(self.session, p)
for p in test_profiles]
# TODO(abhraut): Fix this test to work with real tenant_td
profiles = n1kv_db_v2._get_network_profiles(db_session=self.session)
self.assertEqual(len(test_profiles), len(list(profiles)))
class PolicyProfileTests(base.BaseTestCase):
def setUp(self):
super(PolicyProfileTests, self).setUp()
db.configure_db()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def test_create_policy_profile(self):
_db_profile = n1kv_db_v2.create_policy_profile(TEST_POLICY_PROFILE)
self.assertIsNotNone(_db_profile)
db_profile = (self.session.query(n1kv_models_v2.PolicyProfile).
filter_by(name=TEST_POLICY_PROFILE['name']).one)()
self.assertIsNotNone(db_profile)
self.assertTrue(_db_profile.id == db_profile.id)
self.assertTrue(_db_profile.name == db_profile.name)
def test_delete_policy_profile(self):
profile = _create_test_policy_profile_if_not_there(self.session)
n1kv_db_v2.delete_policy_profile(profile.id)
try:
self.session.query(n1kv_models_v2.PolicyProfile).filter_by(
name=TEST_POLICY_PROFILE['name']).one()
except s_exc.NoResultFound:
pass
else:
self.fail("Policy Profile (%s) was not deleted" %
TEST_POLICY_PROFILE['name'])
def test_update_policy_profile(self):
TEST_PROFILE_1 = {'name': 'test_profile_1'}
profile = _create_test_policy_profile_if_not_there(self.session)
updated_profile = n1kv_db_v2.update_policy_profile(self.session,
profile.id,
TEST_PROFILE_1)
self.assertEqual(updated_profile.name, TEST_PROFILE_1['name'])
def test_get_policy_profile(self):
profile = _create_test_policy_profile_if_not_there(self.session)
got_profile = n1kv_db_v2.get_policy_profile(self.session, profile.id)
self.assertEqual(profile.id, got_profile.id)
self.assertEqual(profile.name, got_profile.name)
class ProfileBindingTests(base.BaseTestCase,
n1kv_db_v2.NetworkProfile_db_mixin,
db_base_plugin_v2.CommonDbMixin):
def setUp(self):
super(ProfileBindingTests, self).setUp()
db.configure_db()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def _create_test_binding_if_not_there(self, tenant_id, profile_id,
profile_type):
try:
_binding = (self.session.query(n1kv_models_v2.ProfileBinding).
filter_by(profile_type=profile_type,
tenant_id=tenant_id,
profile_id=profile_id).one())
except s_exc.NoResultFound:
_binding = n1kv_db_v2.create_profile_binding(self.session,
tenant_id,
profile_id,
profile_type)
return _binding
def test_create_profile_binding(self):
test_tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66"
test_profile_id = "dd7b9741-76ec-11e2-bcfd-0800200c9a66"
test_profile_type = "network"
n1kv_db_v2.create_profile_binding(self.session,
test_tenant_id,
test_profile_id,
test_profile_type)
try:
self.session.query(n1kv_models_v2.ProfileBinding).filter_by(
profile_type=test_profile_type,
tenant_id=test_tenant_id,
profile_id=test_profile_id).one()
except s_exc.MultipleResultsFound:
self.fail("Bindings must be unique")
except s_exc.NoResultFound:
self.fail("Could not create Profile Binding")
def test_get_profile_binding(self):
test_tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66"
test_profile_id = "dd7b9741-76ec-11e2-bcfd-0800200c9a66"
test_profile_type = "network"
self._create_test_binding_if_not_there(test_tenant_id,
test_profile_id,
test_profile_type)
binding = n1kv_db_v2.get_profile_binding(self.session,
test_tenant_id,
test_profile_id)
self.assertEqual(binding.tenant_id, test_tenant_id)
self.assertEqual(binding.profile_id, test_profile_id)
self.assertEqual(binding.profile_type, test_profile_type)
def test_delete_profile_binding(self):
test_tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66"
test_profile_id = "dd7b9741-76ec-11e2-bcfd-0800200c9a66"
test_profile_type = "network"
self._create_test_binding_if_not_there(test_tenant_id,
test_profile_id,
test_profile_type)
n1kv_db_v2.delete_profile_binding(self.session,
test_tenant_id,
test_profile_id)
q = (self.session.query(n1kv_models_v2.ProfileBinding).filter_by(
profile_type=test_profile_type,
tenant_id=test_tenant_id,
profile_id=test_profile_id))
self.assertFalse(q.count())
def test_default_tenant_replace(self):
ctx = context.get_admin_context()
ctx.tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66"
test_profile_id = "AAAAAAAA-76ec-11e2-bcfd-0800200c9a66"
test_profile_type = "policy"
n1kv_db_v2.create_profile_binding(self.session,
cisco_constants.TENANT_ID_NOT_SET,
test_profile_id,
test_profile_type)
network_profile = {"network_profile": TEST_NETWORK_PROFILE}
test_network_profile = self.create_network_profile(ctx,
network_profile)
binding = n1kv_db_v2.get_profile_binding(self.session,
ctx.tenant_id,
test_profile_id)
self.assertIsNone(n1kv_db_v2.get_profile_binding(
self.session,
cisco_constants.TENANT_ID_NOT_SET,
test_profile_id))
self.assertNotEqual(binding.tenant_id,
cisco_constants.TENANT_ID_NOT_SET)
n1kv_db_v2.delete_network_profile(self.session,
test_network_profile['id'])
| 49.295407
| 79
| 0.561355
| 4,923
| 47,225
| 4.978672
| 0.059923
| 0.079437
| 0.050265
| 0.034109
| 0.868135
| 0.845124
| 0.810771
| 0.787679
| 0.753407
| 0.73215
| 0
| 0.029818
| 0.364426
| 47,225
| 957
| 80
| 49.346917
| 0.786773
| 0.0173
| 0
| 0.67217
| 0
| 0
| 0.04471
| 0.01011
| 0
| 0
| 0
| 0.001045
| 0.213443
| 1
| 0.050708
| false
| 0.002358
| 0.014151
| 0
| 0.075472
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
07d79b550ef49e33f02e413c3d066a4b79600f4e
| 205
|
py
|
Python
|
bootpeg/__init__.py
|
eileen-kuehn/bootpeg
|
dffa7af380262d002433da70cab4a5fe00dee028
|
[
"MIT"
] | 5
|
2021-03-27T18:30:40.000Z
|
2022-03-20T21:58:30.000Z
|
bootpeg/__init__.py
|
eileen-kuehn/bootpeg
|
dffa7af380262d002433da70cab4a5fe00dee028
|
[
"MIT"
] | 36
|
2021-03-15T20:48:26.000Z
|
2022-01-19T15:20:52.000Z
|
bootpeg/__init__.py
|
eileen-kuehn/bootpeg
|
dffa7af380262d002433da70cab4a5fe00dee028
|
[
"MIT"
] | 2
|
2021-05-13T05:43:42.000Z
|
2021-09-01T18:36:25.000Z
|
"""bootpeg – the bootstrapping PEG parser"""
from .api import create_parser, import_parser, bootpeg_actions as actions
__all__ = ["create_parser", "import_parser", "actions"]
__version__ = "0.7.0-alpha"
| 29.285714
| 73
| 0.75122
| 28
| 205
| 5.071429
| 0.607143
| 0.169014
| 0.253521
| 0.338028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016575
| 0.117073
| 205
| 6
| 74
| 34.166667
| 0.762431
| 0.185366
| 0
| 0
| 0
| 0
| 0.273292
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
07fe0b7b957dbfe1889d6931d9974e7e66c0da63
| 37
|
py
|
Python
|
PlayingWith/BooleanTests.py
|
Insave/Python
|
ee671b63d1068d0458aae88730a3f9b582c0da85
|
[
"MIT"
] | null | null | null |
PlayingWith/BooleanTests.py
|
Insave/Python
|
ee671b63d1068d0458aae88730a3f9b582c0da85
|
[
"MIT"
] | null | null | null |
PlayingWith/BooleanTests.py
|
Insave/Python
|
ee671b63d1068d0458aae88730a3f9b582c0da85
|
[
"MIT"
] | null | null | null |
print(str(not ((5 > 4) or (3 == 5))))
| 37
| 37
| 0.459459
| 8
| 37
| 2.125
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 0.189189
| 37
| 1
| 37
| 37
| 0.433333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
ed5cd91f50c17a15ada1e9342834a98f1c67b542
| 10,073
|
py
|
Python
|
tests/unit/modules/test_vmc_security_groups.py
|
joechainz/salt-ext-modules-vmware
|
559d08318225076aa5cdbc0f960f3fcf1b04330f
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/modules/test_vmc_security_groups.py
|
joechainz/salt-ext-modules-vmware
|
559d08318225076aa5cdbc0f960f3fcf1b04330f
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/modules/test_vmc_security_groups.py
|
joechainz/salt-ext-modules-vmware
|
559d08318225076aa5cdbc0f960f3fcf1b04330f
|
[
"Apache-2.0"
] | null | null | null |
"""
Unit tests for vmc_security_groups execution module
"""
from unittest.mock import patch
import pytest
import saltext.vmware.modules.vmc_security_groups as vmc_security_groups
from saltext.vmware.utils import vmc_constants
@pytest.fixture
def security_groups_data_by_id(mock_vmc_request_call_api):
data = {
"expression": [
{
"member_type": "VirtualMachine",
"key": "OSName",
"operator": "EQUALS",
"value": "Centos",
"resource_type": "Condition",
"id": "306e22a9-0060-4c11-8557-2ed927887e40",
"path": "/infra/domains/cgw/groups/TEST_GROUP/condition-expressions/306e22a9-0060-4c11-8557-2ed927887e40",
"relative_path": "306e22a9-0060-4c11-8557-2ed927887e40",
"parent_path": "/infra/domains/cgw/groups/TEST_GROUP",
"marked_for_delete": False,
"overridden": False,
"_protection": "NOT_PROTECTED",
}
],
"extended_expression": [],
"reference": False,
"resource_type": "Group",
"id": "security_group_id",
"display_name": "security_group_id",
"description": "TEST Secority group",
"path": "/infra/domains/cgw/groups/TEST_GROUP",
"relative_path": "TEST_GROUP",
"parent_path": "/infra/domains/cgw",
"unique_id": "a6722585-da81-4609-be25-25cd7f7a89f2",
"marked_for_delete": False,
"overridden": False,
"_create_time": 1618809345031,
"_create_user": "pnaval@vmware.com",
"_last_modified_time": 1618809345041,
"_last_modified_user": "pnaval@vmware.com",
"_system_owned": False,
"_protection": "NOT_PROTECTED",
"_revision": 0,
}
mock_vmc_request_call_api.return_value = data
yield data
@pytest.fixture
def security_groups_data(mock_vmc_request_call_api, security_groups_data_by_id):
data = {"result_count": 1, "results": [security_groups_data_by_id]}
mock_vmc_request_call_api.return_value = data
yield data
def test_get_security_groups_should_return_api_response(security_groups_data):
assert (
vmc_security_groups.get(
hostname="hostname",
refresh_key="refresh_key",
authorization_host="authorization_host",
org_id="org_id",
sddc_id="sddc_id",
domain_id="domain_id",
verify_ssl=False,
)
== security_groups_data
)
def test_get_security_groups_called_with_url():
expected_url = (
"https://hostname/vmc/reverse-proxy/api/orgs/org_id/sddcs/sddc_id/policy/api/"
"v1/infra/domains/domain_id/groups"
)
with patch("saltext.vmware.utils.vmc_request.call_api", autospec=True) as vmc_call_api:
result = vmc_security_groups.get(
hostname="hostname",
refresh_key="refresh_key",
authorization_host="authorization_host",
org_id="org_id",
sddc_id="sddc_id",
domain_id="domain_id",
verify_ssl=False,
)
call_kwargs = vmc_call_api.mock_calls[0][-1]
assert call_kwargs["url"] == expected_url
assert call_kwargs["method"] == vmc_constants.GET_REQUEST_METHOD
def test_get_security_group_by_id_should_return_single_security_group(security_groups_data_by_id):
result = vmc_security_groups.get_by_id(
hostname="hostname",
refresh_key="refresh_key",
authorization_host="authorization_host",
org_id="org_id",
sddc_id="sddc_id",
domain_id="domain_id",
security_group_id="security_group_id",
verify_ssl=False,
)
assert result == security_groups_data_by_id
def test_get_security_groups_by_id_called_with_url():
expected_url = (
"https://hostname/vmc/reverse-proxy/api/orgs/org_id/sddcs/sddc_id/policy/api/"
"v1/infra/domains/domain_id/groups/security_group_id"
)
with patch("saltext.vmware.utils.vmc_request.call_api", autospec=True) as vmc_call_api:
result = vmc_security_groups.get_by_id(
hostname="hostname",
refresh_key="refresh_key",
authorization_host="authorization_host",
org_id="org_id",
sddc_id="sddc_id",
domain_id="domain_id",
security_group_id="security_group_id",
verify_ssl=False,
)
call_kwargs = vmc_call_api.mock_calls[0][-1]
assert call_kwargs["url"] == expected_url
assert call_kwargs["method"] == vmc_constants.GET_REQUEST_METHOD
def test_delete_security_group_when_api_should_return_api_response(mock_vmc_request_call_api):
data = {"message": "Security group deleted successfully"}
mock_vmc_request_call_api.return_value = data
assert (
vmc_security_groups.delete(
hostname="hostname",
refresh_key="refresh_key",
authorization_host="authorization_host",
org_id="org_id",
sddc_id="sddc_id",
domain_id="domain_id",
security_group_id="security_group_id",
verify_ssl=False,
)
== data
)
def test_delete_security_groups_by_id_called_with_url():
expected_url = (
"https://hostname/vmc/reverse-proxy/api/orgs/org_id/sddcs/sddc_id/policy/api/"
"v1/infra/domains/domain_id/groups/security_group_id"
)
with patch("saltext.vmware.utils.vmc_request.call_api", autospec=True) as vmc_call_api:
result = vmc_security_groups.delete(
hostname="hostname",
refresh_key="refresh_key",
authorization_host="authorization_host",
org_id="org_id",
sddc_id="sddc_id",
domain_id="domain_id",
security_group_id="security_group_id",
verify_ssl=False,
)
call_kwargs = vmc_call_api.mock_calls[0][-1]
assert call_kwargs["url"] == expected_url
assert call_kwargs["method"] == vmc_constants.DELETE_REQUEST_METHOD
def test_create_security_group_when_api_should_return_api_response(mock_vmc_request_call_api):
data = {"message": "Security group created successfully"}
mock_vmc_request_call_api.return_value = data
assert (
vmc_security_groups.create(
hostname="hostname",
refresh_key="refresh_key",
authorization_host="authorization_host",
org_id="org_id",
sddc_id="sddc_id",
domain_id="domain_id",
security_group_id="security_group_id",
verify_ssl=False,
)
== data
)
def test_create_security_groups_by_id_called_with_url():
expected_url = (
"https://hostname/vmc/reverse-proxy/api/orgs/org_id/sddcs/sddc_id/policy/api/"
"v1/infra/domains/domain_id/groups/security_group_id"
)
with patch("saltext.vmware.utils.vmc_request.call_api", autospec=True) as vmc_call_api:
result = vmc_security_groups.create(
hostname="hostname",
refresh_key="refresh_key",
authorization_host="authorization_host",
org_id="org_id",
sddc_id="sddc_id",
domain_id="domain_id",
security_group_id="security_group_id",
verify_ssl=False,
)
call_kwargs = vmc_call_api.mock_calls[0][-1]
assert call_kwargs["url"] == expected_url
assert call_kwargs["method"] == vmc_constants.PUT_REQUEST_METHOD
@pytest.mark.parametrize(
"actual_args, expected_payload",
[
# all actual args are None
(
{},
{
"id": "security_group_id",
"display_name": "security_group_id",
"description": "",
"expression": [],
"tags": [],
},
),
# allow none have values
(
{"tags": [{"tag": "tag1", "scope": "scope1"}]},
{
"id": "security_group_id",
"display_name": "security_group_id",
"description": "",
"expression": [],
"tags": [{"tag": "tag1", "scope": "scope1"}],
},
),
# all args have values
(
{
"description": "VMs Security groups",
"expression": [
{
"member_type": "VirtualMachine",
"key": "OSName",
"operator": "EQUALS",
"value": "Centos",
"resource_type": "Condition",
}
],
"tags": [{"tag": "tag1", "scope": "scope1"}],
},
{
"id": "security_group_id",
"display_name": "security_group_id",
"description": "VMs Security groups",
"expression": [
{
"member_type": "VirtualMachine",
"key": "OSName",
"operator": "EQUALS",
"value": "Centos",
"resource_type": "Condition",
}
],
"tags": [{"tag": "tag1", "scope": "scope1"}],
},
),
],
)
def test_assert_security_groups_create_should_correctly_filter_args(actual_args, expected_payload):
common_actual_args = {
"hostname": "hostname",
"refresh_key": "refresh_key",
"authorization_host": "authorization_host",
"org_id": "org_id",
"sddc_id": "sddc_id",
"domain_id": "domain_id",
"security_group_id": "security_group_id",
"verify_ssl": False,
}
with patch("saltext.vmware.utils.vmc_request.call_api", autospec=True) as vmc_call_api:
actual_args.update(common_actual_args)
vmc_security_groups.create(**actual_args)
call_kwargs = vmc_call_api.mock_calls[0][-1]
assert call_kwargs["data"] == expected_payload
| 35.34386
| 122
| 0.592078
| 1,093
| 10,073
| 5.046661
| 0.136322
| 0.07306
| 0.067984
| 0.055475
| 0.815083
| 0.768492
| 0.736584
| 0.718093
| 0.718093
| 0.718093
| 0
| 0.021053
| 0.292664
| 10,073
| 284
| 123
| 35.46831
| 0.753123
| 0.012012
| 0
| 0.600791
| 0
| 0.019763
| 0.294627
| 0.067016
| 0
| 0
| 0
| 0
| 0.055336
| 1
| 0.043478
| false
| 0
| 0.01581
| 0
| 0.059289
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ed633771a0dcf96096590223c8408989d8dc4e56
| 174
|
py
|
Python
|
utest/start-all.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | 5
|
2018-04-11T14:11:40.000Z
|
2018-09-12T19:03:36.000Z
|
utest/start-all.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
utest/start-all.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import sys
os.system(sys.executable + " widgets_test.py")
os.system(sys.executable + " api.py")
os.system(sys.executable + " examples.py")
| 19.333333
| 46
| 0.718391
| 27
| 174
| 4.592593
| 0.518519
| 0.193548
| 0.266129
| 0.508065
| 0.370968
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006494
| 0.114943
| 174
| 8
| 47
| 21.75
| 0.798701
| 0.12069
| 0
| 0
| 0
| 0
| 0.230263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
ed772f6b3f9dbc89b7f6d62bb0946cfe313f69d9
| 8,354
|
py
|
Python
|
services/app/src/tests/test_groups.py
|
chimailo/algorice
|
98da1077561980091b6f167077edeb200c778957
|
[
"MIT"
] | null | null | null |
services/app/src/tests/test_groups.py
|
chimailo/algorice
|
98da1077561980091b6f167077edeb200c778957
|
[
"MIT"
] | 1
|
2020-09-21T08:52:36.000Z
|
2020-09-21T08:52:36.000Z
|
services/app/src/tests/test_groups.py
|
chimailo/algorice
|
98da1077561980091b6f167077edeb200c778957
|
[
"MIT"
] | null | null | null |
import json
from src import create_app
from src.config import TestingConfig
from src.blueprints.auth.models import User
from src.blueprints.admin.models import Group, Permission
app = create_app(config=TestingConfig)
def test_get_group(client, groups):
group = Group.find_by_name('test group 1')
response = client.get(f'/api/admin/groups/{group.id}',)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert data.get('name') == 'test group 1'
def test_get_group_invalid_id(client, groups):
response = client.get('/api/admin/groups/66853')
data = json.loads(response.data.decode())
assert response.status_code == 404
assert 'Group not found' in data.get('message')
assert 'Not Found' in data.get('error')
def test_get_all_groups(client, groups):
response = client.get('/api/admin/groups')
data = json.loads(response.data.decode())
assert response.status_code == 200
assert len(data.get('items')) == app.config['ITEMS_PER_PAGE']
def test_all_groups_with_pagination_first_page(client, groups):
response = client.get('/api/admin/groups/page/1')
data = json.loads(response.data.decode())
assert response.status_code == 200
assert len(data.get('items')) <= app.config['ITEMS_PER_PAGE']
assert data.get('next_url') == '/api/admin/groups/page/2'
assert data.get('prev_url') is None
def test_all_groups_with_pagination_last_page(client, groups):
response = client.get('/api/admin/groups/page/2')
data = json.loads(response.data.decode())
assert response.status_code == 200
assert len(data.get('items')) <= app.config['ITEMS_PER_PAGE']
assert data.get('prev_url') == '/api/admin/groups/page/1'
assert data.get('next_url') is None
def test_add_group_no_data(client):
response = client.post(
'/api/admin/groups',
data=json.dumps({}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 400
assert 'No input data provided' in data.get('message')
def test_add_group_invalid_data(client):
response = client.post(
'/api/admin/groups',
data=json.dumps({
'name': 'co/d^mmon',
'description': 'just a common group',
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 422
assert data.get('message') is not None
def test_add_group_duplicate_name(client, groups):
response = client.post(
'/api/admin/groups',
data=json.dumps({
'name': 'test group 1',
'description': 'Another common group',
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 400
assert 'Group already exist.' in data.get('message')
def test_add_group_valid(client, groups):
response = client.post(
'/api/admin/groups',
data=json.dumps({
'name': 'test group 4',
'description': 'just a test group',
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 201
assert response.headers['Location'] is not None
assert isinstance(data, dict) is True
assert data.get('name') == 'test group 4'
def test_update_group_duplicate_name(client, groups):
group = Group.find_by_name('test group 2')
response = client.put(
f'/api/admin/groups/{group.id}',
data=json.dumps({
'name': 'test group 1',
'description': 'just a common group',
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 400
assert 'Group already exists.' in data.get('message')
def test_update_group_no_data(client, groups):
group = Group.find_by_name('test group 2')
response = client.put(
f'/api/admin/groups/{group.id}',
data=json.dumps({}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 400
assert 'No input data provided' in data.get('message')
def test_update_group_invalid_data(client, groups):
group = Group.find_by_name('test group 2')
response = client.put(
f'/api/admin/groups/{group.id}',
data=json.dumps({'name': 'tr*st1'}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 422
assert data.get('message') is not None
def test_update_group(client, groups):
group = Group.find_by_name('test group 2')
response = client.put(
f'/api/admin/groups/{group.id}',
data=json.dumps({
'description': 'test group',
'name': 'test group',
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert isinstance(data, dict) is True
assert data.get('name') == 'test group'
def test_delete_group(client, groups):
group = Group.find_by_name('test group 2')
response = client.delete(f'/api/admin/groups/{group.id}')
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'deleted group' in data.get('message')
def test_delete_group_invalid_id(client, groups):
response = client.delete('/api/admin/groups/333')
data = json.loads(response.data.decode())
assert response.status_code == 404
assert 'Group does not exist.' in data.get('message')
def test_add_group_members(client, users, groups):
user1 = User.find_by_identity('adminuser@test.com')
user2 = User.find_by_identity('regularuser@test.com')
group = Group.find_by_name('test group 1')
response = client.put(
f'/api/admin/groups/{group.id}/members',
content_type='application/json',
data=json.dumps({'users': [user1.id, user2.id]})
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert len(data.get('members')) == 2
assert data.get('members')[0]['username'] == 'adminuser'
assert data.get('members')[1]['username'] == 'regularuser'
def test_remove_group_members(client, users, groups):
user1 = User.find_by_identity('adminuser@test.com')
user2 = User.find_by_identity('regularuser@test.com')
group = Group.find_by_name('test group 3')
group.add_members([user2, user1])
assert len(group.members.all()) == 2
response = client.delete(
f'/api/admin/groups/{group.id}/members',
content_type='application/json',
data=json.dumps({'users': [user1.id, user2.id]})
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert len(data.get('members')) == 0
def test_add_group_perms(client, groups):
name1 = Permission.set_code_name('can view groups')
name2 = Permission.set_code_name('can delete users')
perm1 = Permission.find_by_name(name1)
perm2 = Permission.find_by_name(name2)
group = Group.find_by_name('test group 1')
response = client.put(
f'/api/admin/groups/{group.id}/permissions',
content_type='application/json',
data=json.dumps({'perms': [perm1.id, perm2.id]})
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert len(data.get('permissions')) == 2
assert data.get('permissions')[1]['name'] == 'can view groups'
assert data.get('permissions')[0]['name'] == 'can delete users'
def test_remove_group_perms(client, groups):
name1 = Permission.set_code_name('can view groups')
name2 = Permission.set_code_name('can delete users')
perm1 = Permission.find_by_name(name1)
perm2 = Permission.find_by_name(name2)
group = Group.find_by_name('test group 3')
group.add_permissions([perm2, perm1])
assert len(group.permissions.all()) == 2
response = client.delete(
f'/api/admin/groups/{group.id}/permissions',
content_type='application/json',
data=json.dumps({'perms': [perm1.id, perm2.id]})
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert len(data.get('permissions')) == 0
| 33.821862
| 67
| 0.660881
| 1,115
| 8,354
| 4.810762
| 0.10583
| 0.046234
| 0.05481
| 0.074385
| 0.859433
| 0.823266
| 0.804064
| 0.793997
| 0.763982
| 0.751491
| 0
| 0.018285
| 0.194757
| 8,354
| 246
| 68
| 33.95935
| 0.779099
| 0
| 0
| 0.608911
| 0
| 0
| 0.208403
| 0.055063
| 0
| 0
| 0
| 0
| 0.257426
| 1
| 0.094059
| false
| 0
| 0.024752
| 0
| 0.118812
| 0.009901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ed8365930ffcfb6014bd512d3aed964c66db6fbc
| 15,872
|
py
|
Python
|
test/functional/docker/test_scenarios.py
|
pngmbh/ansible-molecule
|
37bd3bd63a78c187930acb44a798831c42160e16
|
[
"MIT"
] | null | null | null |
test/functional/docker/test_scenarios.py
|
pngmbh/ansible-molecule
|
37bd3bd63a78c187930acb44a798831c42160e16
|
[
"MIT"
] | null | null | null |
test/functional/docker/test_scenarios.py
|
pngmbh/ansible-molecule
|
37bd3bd63a78c187930acb44a798831c42160e16
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import re
import pytest
import sh
import shutil
from molecule import util
from ..conftest import (
change_dir_to,
needs_inspec,
needs_rubocop,
)
@pytest.fixture
def scenario_to_test(request):
return request.param
@pytest.fixture
def scenario_name(request):
try:
return request.param
except AttributeError:
return None
@pytest.fixture
def driver_name(request):
return request.param
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('side_effect', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_side_effect(scenario_to_test, with_scenario, scenario_name):
options = {
'driver_name': 'docker',
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('cleanup', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_cleanup(scenario_to_test, with_scenario, scenario_name):
options = {
'driver_name': 'docker',
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@needs_inspec
@needs_rubocop
def test_command_init_role_inspec(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-init')
options = {
'role_name': 'test-init',
'verifier_name': 'inspec',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
cmd = sh.molecule.bake('test')
pytest.helpers.run_command(cmd)
def test_command_init_scenario_inspec(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-init')
options = {
'role_name': 'test-init',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'test-scenario')
options = {
'scenario_name': 'test-scenario',
'role_name': 'test-init',
'verifier_name': 'inspec',
}
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd)
assert os.path.isdir(scenario_directory)
def test_command_init_role_goss(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-init')
options = {
'role_name': 'test-init',
'verifier_name': 'goss',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
cmd = sh.molecule.bake('test')
pytest.helpers.run_command(cmd)
def test_command_init_scenario_goss(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-init')
options = {
'role_name': 'test-init',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'test-scenario')
options = {
'scenario_name': 'test-scenario',
'role_name': 'test-init',
'verifier_name': 'goss',
}
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd)
assert os.path.isdir(scenario_directory)
def test_command_init_scenario_with_invalid_role_raises(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-role')
options = {
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
options = {
'scenario_name': 'default',
'role_name': 'invalid-role-name',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ("ERROR: The role 'invalid-role-name' not found. "
'Please choose the proper role name.')
assert msg in str(e.value.stderr)
def test_command_init_scenario_as_default_without_default_scenario(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-role')
options = {
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'default')
shutil.rmtree(scenario_directory)
options = {
'scenario_name': 'default',
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd)
assert os.path.isdir(scenario_directory)
# NOTE(retr0h): Molecule does not allow the creation of a role without
# a default scenario. This tests roles not created by a newer Molecule.
def test_command_init_scenario_without_default_scenario_raises(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-role')
options = {
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'default')
shutil.rmtree(scenario_directory)
options = {
'scenario_name': 'invalid-role-name',
'role_name': 'test-role',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ('The default scenario not found. Please create a scenario '
"named 'default' first.")
assert msg in str(e.value.stderr)
def test_command_init_role_with_template(temp_dir):
role_name = 'test-init'
role_directory = os.path.join(temp_dir.strpath, role_name)
options = {
'url': 'https://github.com/ansible/molecule-cookiecutter.git',
'no_input': True,
'role_name': role_name,
}
cmd = sh.molecule.bake('init', 'template', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
cmd = sh.molecule.bake('test')
pytest.helpers.run_command(cmd)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('overrride_driver', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_overrides_driver(scenario_to_test, with_scenario,
driver_name, scenario_name):
options = {
'driver_name': driver_name,
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('driver/docker', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_builds_local_molecule_image(
scenario_to_test, with_scenario, scenario_name, driver_name):
try:
cmd = sh.docker.bake('rmi', 'molecule_local/centos:latest', '--force')
pytest.helpers.run_command(cmd)
except sh.ErrorReturnCode:
pass
pytest.helpers.test(driver_name, scenario_name)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('test_destroy_strategy', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_destroy_strategy_always(scenario_to_test, with_scenario,
scenario_name, driver_name):
options = {
'destroy': 'always',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ("An error occurred during the test sequence action: 'lint'. "
'Cleaning up.')
assert msg in str(e.value.stdout)
assert 'PLAY [Destroy]' in str(e.value.stdout)
assert 0 != e.value.exit_code
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('test_destroy_strategy', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_destroy_strategy_never(scenario_to_test, with_scenario,
scenario_name, driver_name):
options = {
'destroy': 'never',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ("An error occurred during the test sequence action: 'lint'. "
'Cleaning up.')
assert msg not in str(e.value.stdout)
assert 0 != e.value.exit_code
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('host_group_vars', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_host_group_vars(scenario_to_test, with_scenario, scenario_name):
options = {
'all': True,
}
cmd = sh.molecule.bake('test', **options)
out = pytest.helpers.run_command(cmd, log=False)
out = util.strip_ansi_escape(out.stdout.decode('utf-8'))
assert re.search(r'\[instance\].*?ok: \[instance\]', out, re.DOTALL)
assert re.search(r'\[example\].*?ok: \[instance\]', out, re.DOTALL)
assert re.search(r'\[example_1\].*?ok: \[instance\]', out, re.DOTALL)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('idempotence', 'docker', 'raises'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_idempotence_raises(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
'all': True,
'destroy': 'never',
}
cmd = sh.molecule.bake('test', **options)
with pytest.raises(sh.ErrorReturnCode_2) as e:
pytest.helpers.run_command(cmd)
assert 2 == e.value.exit_code
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('interpolation', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_interpolation(scenario_to_test, with_scenario, scenario_name):
# Modify global environment so cleanup inherits our environment.
options = {
'all': True,
}
env = os.environ
env.update({
'DRIVER_NAME': 'docker',
'INSTANCE_NAME': 'instance',
})
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd, env=env)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('verifier', 'docker', 'testinfra'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_verify_testinfra(scenario_to_test, with_scenario,
scenario_name):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('verify', **options)
pytest.helpers.run_command(cmd)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('verifier', 'docker', 'goss'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_verify_goss(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('verify', **options)
pytest.helpers.run_command(cmd)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('verifier', 'docker', 'inspec'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_verify_inspec(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('verify', **options)
pytest.helpers.run_command(cmd)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('plugins', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_plugins(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
| 30.232381
| 79
| 0.646169
| 1,877
| 15,872
| 5.222163
| 0.129995
| 0.083248
| 0.057131
| 0.082126
| 0.773108
| 0.741073
| 0.732606
| 0.721077
| 0.709141
| 0.702612
| 0
| 0.001226
| 0.229209
| 15,872
| 524
| 80
| 30.290076
| 0.799984
| 0.080771
| 0
| 0.719048
| 0
| 0
| 0.205479
| 0.004806
| 0
| 0
| 0
| 0
| 0.033333
| 1
| 0.057143
| false
| 0.002381
| 0.016667
| 0.004762
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71eed86eb1af58b5c4b75294f23a140dae46871b
| 73
|
py
|
Python
|
crypt/config.py
|
EODEgroup/godot-modules
|
dbbf7863e8d20d8732198ba9651093ac458fe951
|
[
"MIT"
] | 2
|
2015-12-03T17:03:06.000Z
|
2015-12-03T21:27:11.000Z
|
crypt/config.py
|
EODEgroup/godot-modules
|
dbbf7863e8d20d8732198ba9651093ac458fe951
|
[
"MIT"
] | null | null | null |
crypt/config.py
|
EODEgroup/godot-modules
|
dbbf7863e8d20d8732198ba9651093ac458fe951
|
[
"MIT"
] | 1
|
2019-01-15T08:48:27.000Z
|
2019-01-15T08:48:27.000Z
|
def can_build(platform) :
return True
def configure(env) :
pass
| 12.166667
| 25
| 0.671233
| 10
| 73
| 4.8
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.246575
| 73
| 5
| 26
| 14.6
| 0.872727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.25
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
9c37af5bde1f7c01566c5727d78224d1b7120b3e
| 5,792
|
py
|
Python
|
tests/test_encryption.py
|
matthiashuschle/aws_tools
|
c9f1fb5cc48cc531c71ba5bc912e262cb1606e53
|
[
"Apache-2.0"
] | null | null | null |
tests/test_encryption.py
|
matthiashuschle/aws_tools
|
c9f1fb5cc48cc531c71ba5bc912e262cb1606e53
|
[
"Apache-2.0"
] | null | null | null |
tests/test_encryption.py
|
matthiashuschle/aws_tools
|
c9f1fb5cc48cc531c71ba5bc912e262cb1606e53
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from hashlib import md5
from aws_glacier_manager import encryption, datatypes
from io import BytesIO
from nacl import pwhash
import os
class TestEncryptionGeneral(TestCase):
def test_general(self):
# create temporary keys
secret_key, auth_key = encryption.create_keys(None)
# secret_box = encryption.get_secret_box_from_key(secret_key)
# create temporary data
data = BytesIO(os.urandom(80000))
# prepare checksum for input data
checksum_in = md5()
checksum_in.update(data.read())
# encrypt
data.seek(0)
data_out = BytesIO()
for chunk in encryption.CryptoHandler(secret_key).encrypt_stream(data):
data_out.write(chunk)
# create signature
data_out.seek(0)
sig = encryption.sign_stream(encryption.get_auth_hmac_from_key(auth_key), data_out)
# verify by signature
data_out.seek(0)
self.assertTrue(encryption.verify_stream(encryption.get_auth_hmac_from_key(auth_key), data_out, sig))
# decrypt
data_out.seek(0)
checksum = md5()
for chunk in encryption.CryptoHandler(secret_key).decrypt_stream(data_out):
checksum.update(chunk)
self.assertEqual(checksum.digest(), checksum_in.digest())
def test_create_keys_from_password(self):
password = b'supersecret'
settings = datatypes.DerivedKeySetup.create_default(enable_auth_key=True)
settings.ops = pwhash.argon2i.OPSLIMIT_MIN
settings.mem = pwhash.argon2i.MEMLIMIT_MIN
keys = encryption.create_keys_from_password(password, settings)
recreated_keys = encryption.create_keys_from_password(password, keys.setup)
self.assertIs(keys.setup, recreated_keys.setup)
self.assertEqual(keys.key_sig, recreated_keys.key_sig)
self.assertEqual(keys.key_enc, recreated_keys.key_enc)
self.assertTrue(len(keys.key_enc))
self.assertTrue(len(keys.key_sig))
def test_class(self):
secret_key, auth_key = encryption.create_keys(None)
handler = encryption.CryptoHandler(secret_key, auth_key)
# create temporary data
data = BytesIO(os.urandom(80000))
# prepare checksum for input data
checksum_in = md5()
checksum_in.update(data.read())
# encrypt
data.seek(0)
data_out = BytesIO()
for chunk in handler.encrypt_stream(data):
data_out.write(chunk)
# create signature
sig = handler.last_signature
# verify by signature
data_out.seek(0)
self.assertTrue(handler.verify_stream(data_out, sig))
# decrypt
data_out.seek(0)
checksum = md5()
for chunk in handler.decrypt_stream(data_out):
checksum.update(chunk)
self.assertEqual(checksum.digest(), checksum_in.digest())
# decrypt with verification
data_out.seek(0)
checksum = md5()
for chunk in handler.decrypt_stream(data_out, signature=sig):
checksum.update(chunk)
self.assertEqual(checksum.digest(), checksum_in.digest())
def test_class_partial(self):
secret_key, auth_key = encryption.create_keys(None)
handler = encryption.CryptoHandler(secret_key, auth_key)
# create temporary data
data = BytesIO(os.urandom(80000))
# prepare checksum for input data
checksum_in = md5()
checksum_in.update(data.read(70000))
# encrypt
data.seek(0)
data_out = BytesIO()
for chunk in handler.encrypt_stream(data, read_total=70000):
data_out.write(chunk)
# create signature
sig = handler.last_signature
# verify by signature
data_out.seek(0)
self.assertTrue(handler.verify_stream(data_out, sig))
# decrypt
data_out.seek(0)
checksum = md5()
for chunk in handler.decrypt_stream(data_out):
checksum.update(chunk)
self.assertEqual(checksum.digest(), checksum_in.digest())
# decrypt with verification
data_out.seek(0)
checksum = md5()
for chunk in handler.decrypt_stream(data_out, signature=sig):
checksum.update(chunk)
self.assertEqual(checksum.digest(), checksum_in.digest())
def test_class_partial_wo_signature(self):
secret_key, _ = encryption.create_keys(None)
handler = encryption.CryptoHandler(secret_key)
# create temporary data
data = BytesIO(os.urandom(80000))
# prepare checksum for input data
checksum_in = md5()
checksum_in.update(data.read(70000))
# encrypt
data.seek(0)
data_out = BytesIO()
for chunk in handler.encrypt_stream(data, read_total=70000):
data_out.write(chunk)
# create signature
self.assertIsNone(handler.last_signature)
# verify by signature should fail
data_out.seek(0)
with self.assertRaises(RuntimeError):
handler.verify_stream(data_out, 'abcd')
# decrypt
data_out.seek(0)
checksum = md5()
for chunk in handler.decrypt_stream(data_out):
checksum.update(chunk)
self.assertEqual(checksum.digest(), checksum_in.digest())
# decrypt with verification should fail
data_out.seek(0)
with self.assertRaises(RuntimeError):
for _ in handler.decrypt_stream(data_out, signature='abcd'):
pass
# decrypt without full verification
data_out.seek(0)
checksum = md5()
for chunk in handler.decrypt_stream(data_out):
checksum.update(chunk)
self.assertEqual(checksum.digest(), checksum_in.digest())
| 38.872483
| 109
| 0.651934
| 686
| 5,792
| 5.297376
| 0.138484
| 0.065493
| 0.039351
| 0.042928
| 0.810952
| 0.800495
| 0.790314
| 0.73803
| 0.710237
| 0.68492
| 0
| 0.016554
| 0.259496
| 5,792
| 148
| 110
| 39.135135
| 0.83073
| 0.111361
| 0
| 0.693694
| 0
| 0
| 0.003714
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 1
| 0.045045
| false
| 0.045045
| 0.054054
| 0
| 0.108108
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
92be0bab44ab31730c3dcca363005db0c3923b88
| 10,762
|
py
|
Python
|
musco/tf/compressor/rank_selection/vbmf.py
|
benjybarnett/musco-tf
|
a84624fdff92abdd7a34e8374f28b23ce2d28b53
|
[
"Apache-2.0"
] | 8
|
2019-10-02T19:17:16.000Z
|
2021-11-16T21:48:29.000Z
|
musco/tf/compressor/rank_selection/vbmf.py
|
benjybarnett/musco-tf
|
a84624fdff92abdd7a34e8374f28b23ce2d28b53
|
[
"Apache-2.0"
] | 1
|
2019-12-11T11:32:33.000Z
|
2019-12-11T11:33:10.000Z
|
musco/tf/compressor/rank_selection/vbmf.py
|
benjybarnett/musco-tf
|
a84624fdff92abdd7a34e8374f28b23ce2d28b53
|
[
"Apache-2.0"
] | 3
|
2020-08-04T03:54:48.000Z
|
2020-11-26T16:08:31.000Z
|
from __future__ import division
from scipy.optimize import minimize_scalar
import numpy as np
def vbmf(Y, cacb=1024, sigma2=None, H=None):
"""Implementation of the analytical solution to Variational Bayes Matrix Factorization.
This function can be used to calculate the analytical solution to VBMF.
This is based on the paper and MatLab code by Nakajima et al.:
"Global analytic solution of fully-observed variational Bayesian matrix factorization."
Notes
-----
If sigma2 is unspecified, it is estimated by minimizing the free energy.
If H is unspecified, it is set to the smallest of the sides of the input Y.
To estimate cacb, use the function EVBMF().
Attributes
----------
Y : numpy-array
Input matrix that is to be factorized. Y has shape (L,M), where L<=M.
cacb : int
Product of the prior variances of the matrices that factorize the input.
sigma2 : int or None (default=None)
Variance of the noise on Y.
H : int or None (default = None)
Maximum rank of the factorized matrices.
Returns
-------
U : numpy-array
Left-singular vectors.
S : numpy-array
Diagonal matrix of singular values.
V : numpy-array
Right-singular vectors.
post : dictionary
Dictionary containing the computed posterior values.
References
----------
.. [1] Nakajima, Shinichi, et al. "Global analytic solution of fully-observed variational Bayesian matrix factorization." Journal of Machine Learning Research 14.Jan (2013): 1-37.
.. [2] Nakajima, Shinichi, et al. "Perfect dimensionality recovery by variational Bayesian PCA." Advances in Neural Information Processing Systems. 2012.
"""
L, M = Y.shape # Has to be L <= M.
if H is None:
H = L
# SVD of the input matrix, max rank of H.
U, s, V = np.linalg.svd(Y)
U = U[:, :H]
s = s[:H]
V = V[:H].T
# Calculate residual.
residual = 0.
if H < L:
residual = np.sum(np.sum(Y ** 2) - np.sum(s ** 2))
# Estimation of the variance when sigma2 is unspecified.
if sigma2 is None:
upper_bound = (np.sum(s ** 2) + residual) / (L + M)
if L == H:
lower_bound = s[-1] ** 2 / M
else:
lower_bound = residual / ((L - H) * M)
sigma2_opt = minimize_scalar(vb_sigma2, args=(L, M, cacb, s, residual), bounds=[lower_bound, upper_bound],
method="Bounded")
sigma2 = sigma2_opt.x
# print("Estimated: ", sigma2)
# Threshold gamma term.
# Formula above (21) from [1].
thresh_term = (L + M + sigma2 / cacb ** 2) / 2
threshold = np.sqrt(sigma2 * (thresh_term + np.sqrt(thresh_term ** 2 - L * M)))
# Number of singular values where gamma > threshold.
pos = np.sum(s > threshold)
# Formula (10) from [2].
d = np.multiply(s[:pos],
1 - np.multiply(sigma2 / (2 * s[:pos] ** 2),
L + M + np.sqrt((M - L) ** 2 + 4 * s[:pos] ** 2 / cacb ** 2)))
# Computation of the posterior.
post = {}
zeta = sigma2 / (2 * L * M) * (L + M + sigma2 / cacb ** 2 - np.sqrt((L + M + sigma2 / cacb ** 2) ** 2 - 4 * L * M))
post["ma"] = np.zeros(H)
post["mb"] = np.zeros(H)
post["sa2"] = cacb * (1 - L * zeta / sigma2) * np.ones(H)
post["sb2"] = cacb * (1 - M * zeta / sigma2) * np.ones(H)
delta = cacb / sigma2 * (s[:pos] - d - L * sigma2 / s[:pos])
post["ma"][:pos] = np.sqrt(np.multiply(d, delta))
post["mb"][:pos] = np.sqrt(np.divide(d, delta))
post["sa2"][:pos] = np.divide(sigma2 * delta, s[:pos])
post["sb2"][:pos] = np.divide(sigma2, np.multiply(delta, s[:pos]))
post["sigma2"] = sigma2
post["F"] = 0.5 * (L * M * np.log(2 * np.pi * sigma2) + (residual + np.sum(s ** 2)) / sigma2 - (L + M) * H
+ np.sum(M * np.log(cacb / post["sa2"]) + L * np.log(cacb / post["sb2"])
+ (post["ma"] ** 2 + M * post["sa2"]) / cacb + (
post["mb"] ** 2 + L * post["sb2"]) / cacb
+ (-2 * np.multiply(np.multiply(post["ma"], post["mb"]), s)
+ np.multiply(post["ma"] ** 2 + M * post["sa2"],
post["mb"] ** 2 + L * post["sb2"])) / sigma2))
return U[:, :pos], np.diag(d), V[:, :pos], post
def vb_sigma2(sigma2, L, M, cacb, s, residual):
H = len(s)
thresh_term = (L + M + sigma2 / cacb ** 2) / 2
threshold = np.sqrt(sigma2 * (thresh_term + np.sqrt(thresh_term ** 2 - L * M)))
pos = np.sum(s > threshold)
d = np.multiply(s[:pos],
1 - np.multiply(sigma2 / (2 * s[:pos] ** 2),
L + M + np.sqrt((M - L) ** 2 + 4 * s[:pos] ** 2 / cacb ** 2)))
zeta = sigma2 / (2 * L * M) * (L + M + sigma2 / cacb ** 2 - np.sqrt((L + M + sigma2 / cacb ** 2) ** 2 - 4 * L * M))
post_ma = np.zeros(H)
post_mb = np.zeros(H)
post_sa2 = cacb * (1 - L * zeta / sigma2) * np.ones(H)
post_sb2 = cacb * (1 - M * zeta / sigma2) * np.ones(H)
delta = cacb / sigma2 * (s[:pos] - d - L * sigma2 / s[:pos])
post_ma[:pos] = np.sqrt(np.multiply(d, delta))
post_mb[:pos] = np.sqrt(np.divide(d, delta))
post_sa2[:pos] = np.divide(sigma2 * delta, s[:pos])
post_sb2[:pos] = np.divide(sigma2, np.multiply(delta, s[:pos]))
F = 0.5 * (L * M * np.log(2 * np.pi * sigma2) + (residual + np.sum(s ** 2)) / sigma2 - (L + M) * H
+ np.sum(M * np.log(cacb / post_sa2) + L * np.log(cacb / post_sb2)
+ (post_ma ** 2 + M * post_sa2) / cacb + (post_mb ** 2 + L * post_sb2) / cacb
+ (-2 * np.multiply(np.multiply(post_ma, post_mb), s)
+ np.multiply(post_ma ** 2 + M * post_sa2, post_mb ** 2 + L * post_sb2)) / sigma2))
return F
def evbmf(Y, sigma2=None, H=None):
"""Implementation of the analytical solution to Empirical Variational Bayes Matrix Factorization.
This function can be used to calculate the analytical solution to empirical VBMF.
This is based on the paper and MatLab code by Nakajima et al.:
"Global analytic solution of fully-observed variational Bayesian matrix factorization."
Notes
-----
If sigma2 is unspecified, it is estimated by minimizing the free energy.
If H is unspecified, it is set to the smallest of the sides of the input Y.
Attributes
----------
Y : numpy-array
Input matrix that is to be factorized. Y has shape (L,M), where L<=M.
sigma2 : int or None (default=None)
Variance of the noise on Y.
H : int or None (default = None)
Maximum rank of the factorized matrices.
Returns
-------
U : numpy-array
Left-singular vectors.
S : numpy-array
Diagonal matrix of singular values.
V : numpy-array
Right-singular vectors.
post : dictionary
Dictionary containing the computed posterior values.
References
----------
.. [1] Nakajima, Shinichi, et al. "Global analytic solution of fully-observed variational Bayesian matrix factorization." Journal of Machine Learning Research 14.Jan (2013): 1-37.
.. [2] Nakajima, Shinichi, et al. "Perfect dimensionality recovery by variational Bayesian PCA." Advances in Neural Information Processing Systems. 2012.
"""
L, M = Y.shape # has to be L<=M
if H is None:
H = L
alpha = L / M
tauubar = 2.5129 * np.sqrt(alpha)
# SVD of the input matrix, max rank of H.
U, s, V = np.linalg.svd(Y)
U = U[:, :H]
s = s[:H]
V = V[:H].T
# Calculate residual.
residual = 0.
if H < L:
residual = np.sum(np.sum(Y ** 2) - np.sum(s ** 2))
# Estimation of the variance when sigma2 is unspecified.
if sigma2 is None:
xubar = (1 + tauubar) * (1 + alpha / tauubar)
eH_ub = int(np.min([np.ceil(L / (1 + alpha)) - 1, H])) - 1
upper_bound = (np.sum(s ** 2) + residual) / (L * M)
lower_bound = np.max([s[eH_ub + 1] ** 2 / (M * xubar), np.mean(s[eH_ub + 1:] ** 2) / M])
scale = 1. # /lower_bound
s = s * np.sqrt(scale)
residual = residual * scale
lower_bound = lower_bound * scale
upper_bound = upper_bound * scale
sigma2_opt = minimize_scalar(evb_sigma2, args=(L, M, s, residual, xubar), bounds=[lower_bound, upper_bound],
method="Bounded")
sigma2 = sigma2_opt.x
# print("Estimated: ", sigma2)
# Threshold gamma term.
threshold = np.sqrt(M * sigma2 * (1 + tauubar) * (1 + alpha / tauubar))
pos = np.sum(s > threshold)
# Formula (15) from [2].
d = np.multiply(s[:pos] / 2, 1 - np.divide((L + M) * sigma2, s[:pos] ** 2) + np.sqrt(
(1 - np.divide((L + M) * sigma2, s[:pos] ** 2)) ** 2 - 4 * L * M * sigma2 ** 2 / s[:pos] ** 4))
# Computation of the posterior.
post = {"ma": np.zeros(H), "mb": np.zeros(H), "sa2": np.zeros(H), "sb2": np.zeros(H), "cacb": np.zeros(H)}
tau = np.multiply(d, s[:pos]) / (M * sigma2)
delta = np.multiply(np.sqrt(np.divide(M * d, L * s[:pos])), 1 + alpha / tau)
post["ma"][:pos] = np.sqrt(np.multiply(d, delta))
post["mb"][:pos] = np.sqrt(np.divide(d, delta))
post["sa2"][:pos] = np.divide(sigma2 * delta, s[:pos])
post["sb2"][:pos] = np.divide(sigma2, np.multiply(delta, s[:pos]))
post["cacb"][:pos] = np.sqrt(np.multiply(d, s[:pos]) / (L * M))
post["sigma2"] = sigma2
post["F"] = 0.5 * (L * M * np.log(2 * np.pi * sigma2) + (residual + np.sum(s ** 2)) / sigma2
+ np.sum(M * np.log(tau + 1) + L * np.log(tau / alpha + 1) - M * tau))
return U[:, :pos], np.diag(d), V[:, :pos], post
def evb_sigma2(sigma2, L, M, s, residual, xubar):
H = len(s)
alpha = L / M
x = s ** 2 / (M * sigma2)
z1 = x[x > xubar]
z2 = x[x <= xubar]
tau_z1 = tau(z1, alpha)
term1 = np.sum(z2 - np.log(z2))
term2 = np.sum(z1 - tau_z1)
term3 = np.sum(np.log(np.divide(tau_z1 + 1, z1)))
term4 = alpha * np.sum(np.log(tau_z1 / alpha + 1))
obj = term1 + term2 + term3 + term4 + residual / (M * sigma2) + (L - H) * np.log(sigma2)
return obj
def phi0(x):
return x - np.log(x)
def phi1(x, alpha):
return np.log(tau(x, alpha) + 1) + alpha * np.log(tau(x, alpha) / alpha + 1) - tau(x, alpha)
def tau(x, alpha):
return 0.5 * (x - (1 + alpha) + np.sqrt((x - (1 + alpha)) ** 2 - 4 * alpha))
| 36.982818
| 183
| 0.53819
| 1,563
| 10,762
| 3.666027
| 0.127959
| 0.013613
| 0.010471
| 0.008551
| 0.80925
| 0.762129
| 0.74712
| 0.742757
| 0.742757
| 0.725654
| 0
| 0.03528
| 0.304683
| 10,762
| 290
| 184
| 37.110345
| 0.730456
| 0.321873
| 0
| 0.440299
| 0
| 0
| 0.015276
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052239
| false
| 0
| 0.022388
| 0.022388
| 0.126866
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
92fd1fec338698e07578c11f762543a6ec74922c
| 151
|
py
|
Python
|
dcp_diag/finders/__init__.py
|
HumanCellAtlas/dcp-diag
|
85c324ed86785d8b224fca2f3cbc1097f8606dde
|
[
"MIT"
] | 2
|
2018-10-03T17:56:54.000Z
|
2018-10-08T18:10:38.000Z
|
dcp_diag/finders/__init__.py
|
HumanCellAtlas/dcp-diag
|
85c324ed86785d8b224fca2f3cbc1097f8606dde
|
[
"MIT"
] | 15
|
2018-11-20T11:06:32.000Z
|
2019-09-17T20:06:20.000Z
|
dcp_diag/finders/__init__.py
|
HumanCellAtlas/dcp-diag
|
85c324ed86785d8b224fca2f3cbc1097f8606dde
|
[
"MIT"
] | null | null | null |
from .finder import Finder
from .analysis_finder import AnalysisFinder
from .ingest_finder import IngestFinder
from .upload_finder import UploadFinder
| 30.2
| 43
| 0.86755
| 19
| 151
| 6.736842
| 0.473684
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10596
| 151
| 4
| 44
| 37.75
| 0.948148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
92fead7971e3f5b3cd5d6123a7a86a5234d1d1bc
| 43
|
py
|
Python
|
portal_gun/fabric/__init__.py
|
Coderik/portal-gun
|
081020a46b16b649497bceb6c2435b1ba135b487
|
[
"MIT"
] | 69
|
2018-05-03T18:25:43.000Z
|
2021-02-10T11:37:28.000Z
|
portal_gun/fabric/__init__.py
|
Coderik/portal-gun
|
081020a46b16b649497bceb6c2435b1ba135b487
|
[
"MIT"
] | 7
|
2018-09-19T06:39:11.000Z
|
2022-03-29T21:55:08.000Z
|
portal_gun/fabric/__init__.py
|
Coderik/portal-gun
|
081020a46b16b649497bceb6c2435b1ba135b487
|
[
"MIT"
] | 11
|
2018-07-30T18:09:12.000Z
|
2019-10-03T15:36:13.000Z
|
from portal_gun.fabric.operations import *
| 21.5
| 42
| 0.837209
| 6
| 43
| 5.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1304bbe030561543d44e8190b98bb75893c61122
| 4,503
|
py
|
Python
|
pyggi/tree/tree.py
|
coinse/py
|
1ecbf881646fe91bbbbbdd8010e2c70a2defab40
|
[
"MIT"
] | 26
|
2018-01-30T13:07:51.000Z
|
2021-08-01T13:41:48.000Z
|
pyggi/tree/tree.py
|
coinse/py
|
1ecbf881646fe91bbbbbdd8010e2c70a2defab40
|
[
"MIT"
] | 9
|
2018-01-10T02:22:10.000Z
|
2021-12-08T06:28:19.000Z
|
pyggi/tree/tree.py
|
coinse/py
|
1ecbf881646fe91bbbbbdd8010e2c70a2defab40
|
[
"MIT"
] | 9
|
2019-02-11T19:00:52.000Z
|
2021-12-30T07:48:52.000Z
|
import os
import ast
import astor
import random
from abc import abstractmethod
from . import AbstractTreeEngine, AstorEngine, XmlEngine
from ..base import AbstractProgram, AbstractEdit
from ..utils import get_file_extension
class TreeProgram(AbstractProgram):
@classmethod
def get_engine(cls, file_name):
extension = get_file_extension(file_name)
if extension in ['.py']:
return AstorEngine
elif extension in ['.xml']:
return XmlEngine
else:
raise Exception('{} file is not supported'.format(extension))
"""
Possible Edit Operators
"""
class TreeEdit(AbstractEdit):
@property
def domain(self):
return TreeProgram
class StmtReplacement(TreeEdit):
def __init__(self, target, ingredient):
self.target = target
self.ingredient = ingredient
def apply(self, program, new_contents, modification_points):
engine = program.engines[self.target[0]]
return engine.do_replace(program, self, new_contents, modification_points)
@classmethod
def create(cls, program, target_file=None, ingr_file=None, method='random'):
if target_file is None:
target_file = program.random_file(AbstractTreeEngine)
if ingr_file is None:
ingr_file = program.random_file(engine=program.engines[target_file])
assert program.engines[target_file] == program.engines[ingr_file]
return cls(program.random_target(target_file, method),
program.random_target(ingr_file, 'random'))
class StmtInsertion(TreeEdit):
def __init__(self, target, ingredient, direction='before'):
assert direction in ['before', 'after']
self.target = target
self.ingredient = ingredient
self.direction = direction
def apply(self, program, new_contents, modification_points):
engine = program.engines[self.target[0]]
return engine.do_insert(program, self, new_contents, modification_points)
@classmethod
def create(cls, program, target_file=None, ingr_file=None, direction=None, method='random'):
if target_file is None:
target_file = program.random_file(AbstractTreeEngine)
if ingr_file is None:
ingr_file = program.random_file(engine=program.engines[target_file])
assert program.engines[target_file] == program.engines[ingr_file]
if direction is None:
direction = random.choice(['before', 'after'])
return cls(program.random_target(target_file, method),
program.random_target(ingr_file, 'random'),
direction)
class StmtDeletion(TreeEdit):
def __init__(self, target):
self.target = target
def apply(self, program, new_contents, modification_points):
engine = program.engines[self.target[0]]
return engine.do_delete(program, self, new_contents, modification_points)
@classmethod
def create(cls, program, target_file=None, method='random'):
if target_file is None:
target_file = program.random_file(AbstractTreeEngine)
return cls(program.random_target(target_file, method))
class StmtMoving(TreeEdit):
def __init__(self, target, ingredient, direction='before'):
assert direction in ['before', 'after']
self.target = target
self.ingredient = ingredient
self.direction = direction
def apply(self, program, new_contents, modification_points):
engine = program.engines[self.target[0]]
engine.do_insert(program, self, new_contents, modification_points)
self.target, self.ingredient = self.ingredient, self.target
return_code = engine.do_delete(program, self, new_contents, modification_points)
self.target, self.ingredient = self.ingredient, self.target
return return_code
@classmethod
def create(cls, program, target_file=None, ingr_file=None, direction=None, method='random'):
if target_file is None:
target_file = program.random_file(AbstractTreeEngine)
if ingr_file is None:
ingr_file = program.random_file(engine=program.engines[target_file])
assert program.engines[target_file] == program.engines[ingr_file]
if direction is None:
direction = random.choice(['before', 'after'])
return cls(program.random_target(target_file, method),
program.random_target(ingr_file, 'random'),
direction)
| 39.5
| 96
| 0.682878
| 513
| 4,503
| 5.80117
| 0.14425
| 0.073925
| 0.069556
| 0.087702
| 0.801075
| 0.792675
| 0.767473
| 0.767473
| 0.752688
| 0.739247
| 0
| 0.001145
| 0.224295
| 4,503
| 113
| 97
| 39.849558
| 0.850845
| 0
| 0
| 0.610526
| 0
| 0
| 0.028846
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 1
| 0.147368
| false
| 0
| 0.084211
| 0.010526
| 0.410526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1329bceb63a64baf7876e71db63c846360a57c5a
| 17,505
|
py
|
Python
|
client/src/dolbyio_rest_apis/communications/monitor/conferences.py
|
dolbyio-samples/dolbyio-rest-apis-client-python
|
37354dc10f967c4656776f9e2651a2284a11f530
|
[
"MIT"
] | 1
|
2021-12-23T17:55:06.000Z
|
2021-12-23T17:55:06.000Z
|
client/src/dolbyio_rest_apis/communications/monitor/conferences.py
|
dolbyio-samples/dolbyio-rest-apis-client-python
|
37354dc10f967c4656776f9e2651a2284a11f530
|
[
"MIT"
] | null | null | null |
client/src/dolbyio_rest_apis/communications/monitor/conferences.py
|
dolbyio-samples/dolbyio-rest-apis-client-python
|
37354dc10f967c4656776f9e2651a2284a11f530
|
[
"MIT"
] | null | null | null |
"""
dolbyio_rest_apis.communications.monitor.conferences
~~~~~~~~~~~~~~~
This module contains the functions to work with the monitor API related to conferences.
"""
from dolbyio_rest_apis.communications.internal.http_context import CommunicationsHttpContext
from dolbyio_rest_apis.communications.internal.urls import get_monitor_url
from dolbyio_rest_apis.communications.monitor.models import GetConferencesResponse, ConferenceSummary, ConferenceStatistics, ConferenceParticipants, ConferenceParticipant
from typing import Any, Dict, List
async def get_conferences(
access_token: str,
tr_from: int=0,
tr_to: int=9999999999999,
maximum: int=100,
start: str=None,
filter_alias: str=None,
active: bool=False,
external_id: str=None,
live_stats: bool=False,
) -> GetConferencesResponse:
r"""
Get a list of conferences.
Get a list of conferences that were started in a specific time range, including ongoing conferences.
Note: Only terminated conferences include a complete summary.
The summary of ongoing conferences includes the following fields in the response:
`confId`, `alias`, `region`, `dolbyVoice`, `start`, `live`, `owner`.
See: https://docs.dolby.io/communications-apis/reference/get-conferences
Args:
access_token: Access token to use for authentication.
tr_from: (Optional) The beginning of the time range (in milliseconds that have elapsed since epoch).
tr_to: (Optional) The end of the time range (in milliseconds that have elapsed since epoch).
maximum: (Optional) The maximum number of displayed results.
We recommend setting the proper value of this parameter to shorten the response time.
start: (Optional) When the results span multiple pages, use this option to navigate through pages.
By default, only the max number of results is displayed. To see the next results,
set the start parameter to the value of the next key returned in the previous response.
filter_alias: (Optional) Search conferences using Alias. Use regular expression to search for conferences with similar aliases.
For example:
- Use `foobar` to get all conferences with alias foobar.
- Use `.*foobar` to get all conferences with alias ending with foobar.
- Use `foobar.*` to get all conferences with alias starting with foobar
- Use `.*foobar.*` to get all conferences with alias containing foobar.
- Use `.*2019.*|.*2020.*` to get all conferences with alias containing either 2019 or 2020.
active: (Optional) Search for ongoing references (`true`) or all conferences (`false`).
external_id: (Optional) The external ID of the participant who created the conference.
live_stats: (Optional) For live conferences, the number of `user`, `listener`, and `pstn` participants.
Returns:
A :class:`GetConferencesResponse` object.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
url = f'{get_monitor_url()}/conferences'
params = {
'from': tr_from,
'to': tr_to,
'max': maximum,
'active': str(active),
'livestats': str(live_stats),
}
if not start is None:
params['start'] = start
if not filter_alias is None:
params['alias'] = filter_alias
if not external_id is None:
params['exid'] = external_id
async with CommunicationsHttpContext() as http_context:
json_response = await http_context.requests_get(
access_token=access_token,
url=url,
params=params,
)
return GetConferencesResponse(json_response)
async def get_all_conferences(
access_token: str,
tr_from: int=0,
tr_to: int=9999999999999,
page_size: int=100,
filter_alias: str=None,
active: bool=False,
external_id: str=None,
live_stats: bool=False,
) -> List[ConferenceSummary]:
r"""
Get a list of all conferences.
Get a list of all conferences that were started in a specific time range, including ongoing conferences.
Note: Only terminated conferences include a complete summary.
The summary of ongoing conferences includes the following fields in the response:
`confId`, `alias`, `region`, `dolbyVoice`, `start`, `live`, `owner`.
See: https://docs.dolby.io/communications-apis/reference/get-conferences
Args:
access_token: Access token to use for authentication.
tr_from: (Optional) The beginning of the time range (in milliseconds that have elapsed since epoch).
tr_to: (Optional) The end of the time range (in milliseconds that have elapsed since epoch).
page_size: (Optional) Number of elements to return per page.
filter_alias: (Optional) Search conferences using Alias. Use regular expression to search for conferences with similar aliases.
For example:
- Use `foobar` to get all conferences with alias foobar.
- Use `.*foobar` to get all conferences with alias ending with foobar.
- Use `foobar.*` to get all conferences with alias starting with foobar
- Use `.*foobar.*` to get all conferences with alias containing foobar.
- Use `.*2019.*|.*2020.*` to get all conferences with alias containing either 2019 or 2020.
active: (Optional) Search for ongoing references (`true`) or all conferences (`false`).
external_id: (Optional) The external ID of the participant who created the conference.
live_stats: (Optional) For live conferences, the number of `user`, `listener`, and `pstn` participants.
Returns:
A list of :class:`Conference` objects.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
url = f'{get_monitor_url()}/conferences'
params = {
'from': tr_from,
'to': tr_to,
'max': page_size,
'active': str(active),
'livestats': str(live_stats),
}
if not filter_alias is None:
params['alias'] = filter_alias
if not external_id is None:
params['exid'] = external_id
async with CommunicationsHttpContext() as http_context:
elements: List[Any] = await http_context.requests_get_all(
access_token=access_token,
url=url,
params=params,
property_name='conferences',
page_size=page_size
)
conferences: List[ConferenceSummary] = []
for element in elements:
conference = ConferenceSummary(element)
conferences.append(conference)
return conferences
async def get_conference(
access_token: str,
conference_id: str,
live_stats: bool=False,
) -> ConferenceSummary:
r"""
Get a summary of a conference.
Note: Only terminated conferences include a complete summary.
The summary of ongoing conferences includes the following fields in the response:
`confId`, `alias`, `region`, `dolbyVoice`, `start`, `live`, `owner`.
See: https://docs.dolby.io/communications-apis/reference/get-conference-summary
Args:
access_token: Access token to use for authentication.
conference_id: Identifier of the conference.
live_stats: (Optional) For live conferences, the number of `user`, `listener`, and `pstn` participants.
Returns:
A :class:`ConferenceSummary` object.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
url = f'{get_monitor_url()}/conferences/{conference_id}'
params = {
'livestats': str(live_stats),
}
async with CommunicationsHttpContext() as http_context:
json_response = await http_context.requests_get(
access_token=access_token,
url=url,
params=params,
)
return ConferenceSummary(json_response)
async def get_conference_statistics(
access_token: str,
conference_id: str,
) -> ConferenceStatistics:
r"""
Get a conference statistics.
Get statistics of a terminated conference.
The statistics include the maximum number of participants present during a conference
and the maximum number of the transmitted and received packets, bytes, and streams.
Note: The statistics are available only for terminated conferences.
See: https://docs.dolby.io/communications-apis/reference/get-conference-statistics
Args:
access_token: Access token to use for authentication.
conference_id: Identifier of the conference.
live_stats: For live conferences, the number of `user`, `listener`, and `pstn` participants.
Returns:
A :class:`ConferenceStatistics` object.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
url = f'{get_monitor_url()}/conferences/{conference_id}/statistics'
async with CommunicationsHttpContext() as http_context:
json_response = await http_context.requests_get(
access_token=access_token,
url=url,
)
return ConferenceStatistics(json_response)
async def get_conference_participants(
access_token: str,
conference_id: str,
participant_type: str=None,
tr_from: int=0,
tr_to: int=9999999999999,
maximum: int=100,
start: str=None,
) -> ConferenceParticipants:
r"""
Get information about conference participants.
Get statistics and connection details of all participants in a conference.
Optionally limit the search result with a specific time range.
See: https://docs.dolby.io/communications-apis/reference/get-info-conference-participants
Args:
access_token: Access token to use for authentication.
conference_id: Identifier of the conference.
participant_type: (Optional) The conference participant type.
- `user` - a participant who can send and receive video/audio stream to/from the conference.
- `listener` - a participant who can only receive video/audio stream from the conference.
- `pstn` - a participant who connected to the conference using PSTN (telephony network).
- `mixer` - an internal type indicating a mixer connection to the conference.
tr_from: (Optional) The beginning of the time range (in milliseconds that have elapsed since epoch).
tr_to: (Optional) The end of the time range (in milliseconds that have elapsed since epoch).
maximum: (Optional) The maximum number of displayed results.
We recommend setting the proper value of this parameter to shorten the response time.
start: (Optional) When the results span multiple pages, use this option to navigate through pages.
By default, only the max number of results is displayed. To see the next results,
set the start parameter to the value of the next key returned in the previous response.
Returns:
A :class:`ConferenceParticipants` object.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
url = f'{get_monitor_url()}/conferences/{conference_id}/participants'
params = {
'from': tr_from,
'to': tr_to,
'max': maximum,
}
if not participant_type is None:
params['type'] = participant_type
if not start is None:
params['start'] = start
async with CommunicationsHttpContext() as http_context:
json_response = await http_context.requests_get(
access_token=access_token,
url=url,
params=params,
)
return ConferenceParticipants(json_response)
async def get_all_conference_participants(
access_token: str,
conference_id: str,
participant_type: str=None,
tr_from: int=0,
tr_to: int=9999999999999,
page_size: int=100,
) -> Dict[str, ConferenceParticipant]:
r"""
Get information about all conference participants.
Get statistics and connection details of all participants in a conference.
Optionally limit the search result with a specific time range.
See: https://docs.dolby.io/communications-apis/reference/get-info-conference-participants
Args:
access_token: Access token to use for authentication.
conference_id: Identifier of the conference.
participant_type: (Optional) The conference participant type.
- `user` - a participant who can send and receive video/audio stream to/from the conference.
- `listener` - a participant who can only receive video/audio stream from the conference.
- `pstn` - a participant who connected to the conference using PSTN (telephony network).
- `mixer` - an internal type indicating a mixer connection to the conference.
tr_from: (Optional) The beginning of the time range (in milliseconds that have elapsed since epoch).
tr_to: (Optional) The end of the time range (in milliseconds that have elapsed since epoch).
page_size: (Optional) Number of elements to return per page.
Returns:
A list of :class:`ConferenceParticipant` objects.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
url = f'{get_monitor_url()}/conferences/{conference_id}/participants'
params = {
'from': tr_from,
'to': tr_to,
'max': page_size,
}
if not participant_type is None:
params['type'] = participant_type
participants: Dict[str, ConferenceParticipant] = {}
async with CommunicationsHttpContext() as http_context:
while True:
json_response = await http_context.requests_get(
access_token=access_token,
url=url,
params=params,
)
if 'participants' in json_response:
sub_result = json_response['participants']
for participant_id in sub_result:
participant = ConferenceParticipant(participant_id, sub_result[participant_id])
participants[participant.user_id] = participant
if len(sub_result) < page_size:
break
if not 'next' in json_response:
break
params['start'] = json_response['next']
if params['start'] is None or params['start'] == '':
break
return participants
async def get_conference_participant(
access_token: str,
conference_id: str,
participant_id: str,
participant_type: str=None,
tr_from: int=0,
tr_to: int=9999999999999,
maximum: int=100,
start: str=None,
) -> ConferenceParticipant:
r"""
Get information about a specific conference participant.
Gets the statistics and connection details of a conference participant, during a specific time range.
See: https://docs.dolby.io/communications-apis/reference/get-info-conference-participant
Args:
access_token: Access token to use for authentication.
conference_id: Identifier of the conference.
participant_id: Identifier of the participant.
participant_type: (Optional) The conference participant type.
- `user` - a participant who can send and receive video/audio stream to/from the conference.
- `listener` - a participant who can only receive video/audio stream from the conference.
- `pstn` - a participant who connected to the conference using PSTN (telephony network).
- `mixer` - an internal type indicating a mixer connection to the conference.
tr_from: (Optional) The beginning of the time range (in milliseconds that have elapsed since epoch).
tr_to: (Optional) The end of the time range (in milliseconds that have elapsed since epoch).
maximum: (Optional) The maximum number of displayed results.
We recommend setting the proper value of this parameter to shorten the response time.
start: (Optional) When the results span multiple pages, use this option to navigate through pages.
By default, only the max number of results is displayed. To see the next results,
set the start parameter to the value of the next key returned in the previous response.
Returns:
A :class:`ConferenceParticipant` object.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
url = f'{get_monitor_url()}/conferences/{conference_id}/participants/{participant_id}'
params = {
'from': tr_from,
'to': tr_to,
'max': maximum,
}
if not participant_type is None:
params['type'] = participant_type
if not start is None:
params['start'] = start
async with CommunicationsHttpContext() as http_context:
json_response = await http_context.requests_get(
access_token=access_token,
url=url,
params=params,
)
participants = ConferenceParticipants(json_response)
return participants[participant_id]
| 39.874715
| 170
| 0.66878
| 2,097
| 17,505
| 5.483548
| 0.105389
| 0.033481
| 0.020697
| 0.026785
| 0.840421
| 0.816071
| 0.787199
| 0.78372
| 0.777894
| 0.770589
| 0
| 0.008984
| 0.256041
| 17,505
| 438
| 171
| 39.965753
| 0.873992
| 0.008969
| 0
| 0.653061
| 0
| 0
| 0.08169
| 0.053966
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.020408
| 0
| 0.056122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
135da54d3e240141267fc4f867360f8045c18e34
| 39
|
py
|
Python
|
cookiecutters/project/{{cookiecutter.module_name}}/{{cookiecutter.module_name}}/apps/rest_api/serializers/__init__.py
|
pythrick/drf-cli
|
029c1437b21e9eabd6f30477df60bbe18471a319
|
[
"MIT"
] | 7
|
2020-05-12T01:07:59.000Z
|
2020-10-07T13:26:45.000Z
|
cookiecutters/project/{{cookiecutter.module_name}}/{{cookiecutter.module_name}}/apps/rest_api/serializers/__init__.py
|
pythrick/drf-cli
|
029c1437b21e9eabd6f30477df60bbe18471a319
|
[
"MIT"
] | 7
|
2020-05-12T16:13:09.000Z
|
2020-10-07T12:38:44.000Z
|
cookiecutters/project/{{cookiecutter.module_name}}/{{cookiecutter.module_name}}/apps/rest_api/serializers/__init__.py
|
pythrick/drf-cli
|
029c1437b21e9eabd6f30477df60bbe18471a319
|
[
"MIT"
] | null | null | null |
from .profile import ProfileSerializer
| 19.5
| 38
| 0.871795
| 4
| 39
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13b34561a7358d93c40df4c11480bc12dd226260
| 172
|
py
|
Python
|
discode/models/__init__.py
|
TheFarGG/Discode
|
facf6cd4f82baef2288a23dbe6f2a02dfc2407e2
|
[
"MIT"
] | 3
|
2021-11-06T11:07:18.000Z
|
2022-03-18T09:04:42.000Z
|
discode/models/__init__.py
|
UnrealFar/Discode
|
facf6cd4f82baef2288a23dbe6f2a02dfc2407e2
|
[
"MIT"
] | 3
|
2021-11-06T11:22:05.000Z
|
2022-03-12T16:36:52.000Z
|
discode/models/__init__.py
|
UnrealFar/Discode
|
facf6cd4f82baef2288a23dbe6f2a02dfc2407e2
|
[
"MIT"
] | 4
|
2021-11-06T11:08:26.000Z
|
2022-03-12T14:25:57.000Z
|
from .assets import *
from .channel import *
from .guild import *
from .role import *
from .member import *
from .message import *
from .user import *
from .emoji import *
| 19.111111
| 22
| 0.72093
| 24
| 172
| 5.166667
| 0.416667
| 0.564516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 172
| 8
| 23
| 21.5
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13c0dfb62c63f0f5ab51133c698909a2f6c6101f
| 3,041
|
py
|
Python
|
pypy/tool/pytest/test/test_conftest1.py
|
Qointum/pypy
|
c0ed88efbc135a75a535f4534ca1f3baf0bf39d8
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/tool/pytest/test/test_conftest1.py
|
Qointum/pypy
|
c0ed88efbc135a75a535f4534ca1f3baf0bf39d8
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/tool/pytest/test/test_conftest1.py
|
Qointum/pypy
|
c0ed88efbc135a75a535f4534ca1f3baf0bf39d8
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
import py
import sys
innertest = py.path.local(__file__).dirpath('conftest1_innertest.py')
pytest_plugins = "pytester"
class TestPyPyTests:
def test_selection_by_keyword_interp(self, testdir):
sorter = testdir.inline_run("-m", "interplevel", innertest, )
passed, skipped, failed = sorter.listoutcomes()
assert len(passed) == 2, len(passed)
assert not skipped and not failed
assert "test_something" in passed[0].nodeid
assert "test_method" in passed[1].nodeid
def test_selection_by_keyword_app(self, testdir):
sorter = testdir.inline_run("-m", "applevel -docstring", innertest)
passed, skipped, failed = sorter.listoutcomes()
assert len(passed) == 4
assert not skipped
assert len(failed) == 2
assert "app_test_something" in passed[0].nodeid
assert "test_method_app" in passed[1].nodeid
def test_runappdirect(self, testdir):
sorter = testdir.inline_run(innertest, '-m', 'applevel -docstring',
'--runappdirect')
passed, skipped, failed = sorter.listoutcomes()
assert len(passed) == 4
print passed
assert "app_test_something" in passed[0].nodeid
assert "test_method_app" in passed[1].nodeid
def test_docstring_in_methods(self, testdir):
sorter = testdir.inline_run("-k", "AppTestSomething and test_code_in_docstring",
innertest)
passed, skipped, failed = sorter.listoutcomes()
assert len(passed) == 1
assert len(failed) == 1
assert skipped == []
assert "test_code_in_docstring_ignored" in passed[0].nodeid
assert "test_code_in_docstring_failing" in failed[0].nodeid
def test_docstring_in_functions(self, testdir):
sorter = testdir.inline_run("-k", "app_test_code_in_docstring", innertest)
passed, skipped, failed = sorter.listoutcomes()
assert passed == []
assert len(failed) == 1
assert skipped == []
assert "app_test_code_in_docstring_failing" in failed[0].nodeid
def test_docstring_runappdirect(self, testdir):
sorter = testdir.inline_run(innertest,
'-k', 'test_code_in_docstring',
'--runappdirect')
passed, skipped, failed = sorter.listoutcomes()
assert len(passed) == 1
assert len(failed) == 2
assert "test_code_in_docstring_ignored" in passed[0].nodeid
assert "app_test_code_in_docstring_failing" in failed[0].nodeid
assert "test_code_in_docstring_failing" in failed[1].nodeid
def test_raises_inside_closure(self, testdir):
sorter = testdir.inline_run(innertest, '-k', 'app_test_raise_in_a_closure',
'--runappdirect')
passed, skipped, failed = sorter.listoutcomes()
assert len(passed) == 1
print passed
assert "app_test_raise_in_a_closure" in passed[0].nodeid
| 42.830986
| 88
| 0.635646
| 352
| 3,041
| 5.238636
| 0.170455
| 0.048807
| 0.048807
| 0.092733
| 0.835683
| 0.779284
| 0.743492
| 0.646421
| 0.563991
| 0.505965
| 0
| 0.010748
| 0.265702
| 3,041
| 70
| 89
| 43.442857
| 0.815047
| 0
| 0
| 0.508197
| 0
| 0
| 0.183882
| 0.109868
| 0
| 0
| 0
| 0
| 0.459016
| 0
| null | null | 0.409836
| 0.032787
| null | null | 0.032787
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
13d8f79586e0bb1ef36183a727e26549e5e516e7
| 156
|
py
|
Python
|
cometcurve/tests/test_models.py
|
mrtommyb/cometcurve
|
36d4d28d6dd28ebe85554ea09d77a22222e13dce
|
[
"MIT"
] | 3
|
2020-04-06T02:15:50.000Z
|
2020-09-29T16:19:07.000Z
|
cometcurve/tests/test_models.py
|
mrtommyb/cometcurve
|
36d4d28d6dd28ebe85554ea09d77a22222e13dce
|
[
"MIT"
] | 1
|
2020-04-08T17:22:12.000Z
|
2020-04-08T17:22:12.000Z
|
cometcurve/tests/test_models.py
|
mrtommyb/cometcurve
|
36d4d28d6dd28ebe85554ea09d77a22222e13dce
|
[
"MIT"
] | 2
|
2020-04-06T23:46:54.000Z
|
2020-04-08T16:50:44.000Z
|
from ..models import comet_magnitude_power_law
def test_power_law():
mag = comet_magnitude_power_law(h=10., n=1., delta=1., r=1.)
assert mag == 10.
| 31.2
| 64
| 0.705128
| 27
| 156
| 3.777778
| 0.62963
| 0.235294
| 0.372549
| 0.431373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05303
| 0.153846
| 156
| 5
| 65
| 31.2
| 0.719697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
13ef0c66fafcc59b64c4243eaf97a69ba30ac764
| 28,433
|
py
|
Python
|
pirates/leveleditor/worldData/tutorial_interior_withPit.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 81
|
2018-04-08T18:14:24.000Z
|
2022-01-11T07:22:15.000Z
|
pirates/leveleditor/worldData/tutorial_interior_withPit.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 4
|
2018-09-13T20:41:22.000Z
|
2022-01-08T06:57:00.000Z
|
pirates/leveleditor/worldData/tutorial_interior_withPit.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 26
|
2018-05-26T12:49:27.000Z
|
2021-09-11T09:11:59.000Z
|
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Objects': {'1159905354.84jubutler': {'Type': 'Building Interior','Name': '','Instanced': True,'Objects': {'1152830677.95jubutler': {'Type': 'Townsperson','Category': 'Cast','AnimSet': 'wt_sword','AuraFX': 'None','Boss': False,'CustomModel': 'models/char/wt_2000','DNA': '1152830677.95jubutler','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','Hpr': VBase3(130.429, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': 12,'Pos': Point3(-22.707, 1.821, -0.015),'PoseAnim': '','PoseFrame': '','Private Status': 'All','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Start State': 'Idle','StartFrame': '0','Team': 'Villager','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Zombie': False,'spawnTimeAlt': '','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1159918729.95jubutler': {'Type': 'Barrel','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-1.504, -22.449, -6.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/barrel'}},'1159918893.65jubutler': {'Type': 'Crate','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Objects': {},'Pos': Point3(-2.176, 21.64, -6.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0),'Model': 'models/props/crate'}},'1159918930.42jubutler': {'Type': 'Crate','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Objects': {},'Pos': Point3(-2.186, 16.699, -6.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/crate'}},'1159918935.67jubutler': {'Type': 'Crate','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Objects': {},'Pos': Point3(-2.914, 13.461, -6.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/crate'}},'1159924057.48jubutler': {'Type': 'Crate','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Objects': {'1159924154.7jubutler': {'Type': 'Bucket','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(0.004, 0.185, 6.192),'Scale': VBase3(0.851, 0.851, 0.851),'Visual': {'Model': 'models/props/bucket'}}},'Pos': Point3(50.652, -7.089, -6.0),'Scale': VBase3(1.176, 1.176, 1.176),'Visual': {'Model': 'models/props/crates_group_2'}},'1159924096.73jubutler': {'Type': 'Crate','DisableCollision': False,'Hpr': VBase3(-6.51, 0.0, 0.0),'Pos': Point3(50.383, -15.353, -6.0),'Scale': VBase3(0.854, 0.854, 0.854),'Visual': {'Color': (1.0, 0.800000011920929, 0.6000000238418579, 1.0),'Model': 'models/props/crates_group_1'}},'1159924766.73jubutler': {'Type': 'Sack','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(57.838, 5.595, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/Sack'}},'1159924775.59jubutler': {'Type': 'Sack','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(57.759, 2.133, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/Sack'}},'1159924787.12jubutler': {'Type': 'Sack','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(58.418, 3.914, 1.032),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/Sack'}},'1159924828.45jubutler': {'Type': 'Sack','DisableCollision': False,'Hpr': VBase3(0.0, 28.245, -1.443),'Pos': Point3(57.693, 0.617, 0.699),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/Sack'}},'1159924880.81jubutler': {'Type': 'Rope','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(57.852, 4.166, 2.33),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/rope_pile'}},'1159925037.63jubutler': {'Type': 'Interactive Prop','Hpr': VBase3(-27.961, 0.0, 0.0),'Pos': Point3(33.821, 13.594, -6.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/dummy_zero'},'interactAble': 'player','interactType': 'hit'},'1159925055.62jubutler': {'Type': 'Interactive Prop','Hpr': VBase3(27.507, 0.0, 0.0),'Pos': Point3(15.688, 14.335, -6.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/dummy_zero'},'interactAble': 'player','interactType': 'hit'},'1159925061.38jubutler': {'Type': 'Interactive Prop','Hpr': VBase3(-118.811, 0.0, 0.0),'Pos': Point3(37.192, -7.817, -6.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/dummy_zero'},'interactAble': 'player','interactType': 'hit'},'1159925071.65jubutler': {'Type': 'Interactive Prop','Hpr': VBase3(-152.47, 0.0, 0.0),'Pos': Point3(30.682, -14.427, -6.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/dummy_zero'},'interactAble': 'player','interactType': 'hit'},'1159925600.84jubutler': {'Type': 'Baskets','DisableCollision': False,'Hpr': VBase3(21.178, 0.012, 0.396),'Pos': Point3(-263.954, -3652.912, 19.55),'Scale': VBase3(2.018, 2.018, 2.018),'Visual': {'Model': 'models/props/basket'}},'1159925605.49jubutler': {'Type': 'Baskets','DisableCollision': False,'Hpr': VBase3(42.355, 0.166, 0.761),'Pos': Point3(-264.163, -3656.143, 19.574),'Scale': VBase3(2.018, 2.018, 2.018),'Visual': {'Model': 'models/props/basket'}},'1159925613.32jubutler': {'Type': 'Baskets','DisableCollision': False,'Hpr': VBase3(63.532, 0.442, 1.045),'Pos': Point3(-256.015, -3649.975, 19.436),'Scale': VBase3(2.018, 2.018, 2.018),'Visual': {'Model': 'models/props/basket'}},'1159925616.7jubutler': {'Type': 'Baskets','DisableCollision': False,'Hpr': VBase3(21.178, 0.012, 0.396),'Pos': Point3(-257.856, -3649.215, 19.453),'Scale': VBase3(2.018, 2.018, 2.018),'Visual': {'Model': 'models/props/basket'}},'1159925659.09jubutler': {'Type': 'Baskets','DisableCollision': False,'Hpr': VBase3(21.178, 0.012, 0.396),'Pos': Point3(-255.926, -3651.044, 19.442),'Scale': VBase3(2.284, 2.284, 2.284),'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/basket'}},'1159925780.04jubutler': {'Type': 'Cart','DisableCollision': False,'Hpr': VBase3(76.866, 0.0, 0.0),'Pos': Point3(-53.645, 30.909, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/cart_flat'}},'1170980361.73dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.00005','ConeAngle': '60.0000','DropOff': '0.0000','FlickRate': 0.5,'Flickering': False,'Hpr': VBase3(-129.016, -26.232, -108.469),'Intensity': '0.3030','LightType': 'AMBIENT','Pos': Point3(-41.861, -0.155, 8.914),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 1, 1, 1),'Model': 'models/props/light_tool_bulb'}},'1170980523.23dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.00005','ConeAngle': '60.0000','DropOff': '12.2727','FlickRate': 0.5,'Flickering': False,'Hpr': VBase3(-67.358, -28.05, -93.578),'Intensity': '0.9394','LightType': 'SPOT','Pos': Point3(-65.69, -4.01, 17.472),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/light_tool_bulb'}},'1170980694.88dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.02','ConeAngle': '60.0000','DropOff': '83.1818','FlickRate': 0.5,'Flickering': False,'Hpr': VBase3(54.652, -12.703, 15.542),'Intensity': '0.5758','LightType': 'SPOT','Pos': Point3(10.174, -38.786, 12.998),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/light_tool_bulb'}},'1171097747.0MAsaduzz': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '60.0000','DropOff': '62.7273','FlickRate': 0.5,'Flickering': False,'Hpr': VBase3(120.385, -12.537, -15.866),'Instanced': False,'Intensity': '0.7727','LightType': 'SPOT','Pos': Point3(10.513, 37.976, 15.369),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1.0, 1.0, 1.0, 1.0),'Model': 'models/props/light_tool_bulb'}},'1175046030.53dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.00005','ConeAngle': '32.5000','DropOff': '6.8182','FlickRate': 0.5,'Flickering': False,'Hpr': VBase3(-116.324, -24.39, -124.869),'Intensity': '0.9091','LightType': 'SPOT','Pos': Point3(-64.371, -0.41, 28.642),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/light_tool_bulb'}},'1175820082.68dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '54.7727','DropOff': '20.4545','FlickRate': 0.5,'Flickering': True,'Hpr': Point3(0.0, 0.0, 0.0),'Intensity': '0.2727','LightType': 'POINT','Pos': Point3(23.715, 3.96, -1.451),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.8700000047683716, 1.0, 1.0, 1.0),'Model': 'models/props/light_tool_bulb'}},'1177982558.47kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': VBase3(-27.924, 0.0, 0.0),'Pos': Point3(-53.434, 43.908, 0.0),'Scale': VBase3(0.947, 0.947, 0.947),'Visual': {'Model': 'models/props/prop_group03'}},'1177982584.22kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': VBase3(-178.616, 0.0, 0.0),'Pos': Point3(-27.875, 45.944, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.7200000286102295, 0.699999988079071, 0.5899999737739563, 1.0),'Model': 'models/props/prop_group_G'}},'1177982656.34kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-11.556, 43.735, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.7099999785423279, 0.6700000166893005, 0.6000000238418579, 1.0),'Model': 'models/props/prop_group_E'}},'1177982763.45kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': VBase3(6.771, 0.0, 0.0),'Pos': Point3(53.508, 43.24, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.7099999785423279, 0.6700000166893005, 0.6000000238418579, 1.0),'Model': 'models/props/prop_group_C'}},'1177982906.03kmuller': {'Type': 'Crate','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Objects': {},'Pos': Point3(56.704, -35.212, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/crates_group_2'}},'1177982920.28kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': VBase3(-24.802, 0.0, 0.0),'Pos': Point3(42.232, -49.24, 0.0),'Scale': VBase3(0.887, 0.887, 0.887),'Visual': {'Color': (0.47999998927116394, 0.4399999976158142, 0.3700000047683716, 1.0),'Model': 'models/props/prop_group_A'}},'1177982957.33kmuller': {'Type': 'Barrel','DisableCollision': True,'Hpr': VBase3(-58.926, 0.0, 0.0),'Pos': Point3(50.928, -45.439, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.4300000071525574, 0.3499999940395355, 0.3499999940395355, 1.0),'Model': 'models/props/barrel_group_2'}},'1177982993.03kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': VBase3(89.937, 0.0, 0.0),'Pos': Point3(22.154, -40.394, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/prop_group_B'}},'1177983026.17kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': VBase3(140.559, 0.0, 0.0),'Pos': Point3(48.385, -37.906, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6, 0.72, 0.71, 1.0),'Model': 'models/props/prop_group02'}},'1177983102.81kmuller': {'Type': 'Barrel','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(37.696, -42.193, 0.0),'Scale': VBase3(0.626, 0.626, 0.626),'Visual': {'Color': (0.49000000953674316, 0.47999998927116394, 0.4000000059604645, 1.0),'Model': 'models/props/barrel_grey'}},'1177983164.17kmuller': {'Type': 'Crate','DisableCollision': True,'Hpr': VBase3(90.231, 0.0, 0.0),'Pos': Point3(36.466, -46.174, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.5799999833106995, 0.47999998927116394, 0.4000000059604645, 1.0),'Model': 'models/props/crates_group_1'}},'1177983219.08kmuller': {'Type': 'Trunks','DisableCollision': True,'Hpr': VBase3(-179.35, 0.0, 0.0),'Pos': Point3(27.782, -38.691, 0.0),'Scale': VBase3(1.747, 1.747, 1.747),'Visual': {'Model': 'models/props/Trunk_square'}},'1177983306.06kmuller': {'Type': 'Prop_Groups','DisableCollision': False,'Hpr': VBase3(130.346, 0.0, 0.0),'Pos': Point3(12.907, -33.953, 0.0),'Scale': VBase3(0.86, 0.86, 0.86),'Visual': {'Model': 'models/props/prop_group_D'}},'1177983368.98kmuller': {'Type': 'Crate','DisableCollision': True,'Hpr': VBase3(-10.938, 0.0, 0.0),'Pos': Point3(15.997, -40.845, 0.0),'Scale': VBase3(1.429, 1.429, 1.429),'Visual': {'Model': 'models/props/crates_group_2'}},'1177983477.67kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': VBase3(-172.15, 0.0, 0.0),'Pos': Point3(0.345, -43.915, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.30000001192092896, 0.30000001192092896, 0.30000001192092896, 1.0),'Model': 'models/props/prop_group_C'}},'1177983618.61kmuller': {'Type': 'Crate','DisableCollision': True,'Hpr': VBase3(-29.624, 0.0, 0.0),'Pos': Point3(-7.628, -44.735, 0.0),'Scale': VBase3(1.401, 1.401, 1.401),'Visual': {'Color': (0.5686274766921997, 0.5529412031173706, 0.4941176474094391, 1.0),'Model': 'models/props/crates_group_1'}},'1177983728.36kmuller': {'Type': 'Trunks','DisableCollision': False,'Hpr': VBase3(-162.8, 0.0, 0.0),'Pos': Point3(-1.365, -39.375, 0.0),'Scale': VBase3(1.338, 1.338, 1.338),'Visual': {'Color': (0.47999998927116394, 0.4399999976158142, 0.3700000047683716, 1.0),'Model': 'models/props/Trunk_rounded_2'}},'1177983806.69kmuller': {'Type': 'Cart','DisableCollision': True,'Hpr': VBase3(-14.723, 0.0, 0.0),'Pos': Point3(-48.433, -45.038, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.7490196228027344, 0.7137255072593689, 0.6000000238418579, 1.0),'Model': 'models/props/cart_broken'}},'1177984119.81kmuller': {'Type': 'Prop_Groups','DisableCollision': False,'Hpr': VBase3(-76.987, 0.0, 0.0),'Pos': Point3(-55.548, -29.467, -0.219),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.97, 0.95, 0.88, 1.0),'Model': 'models/props/prop_group_G'}},'1187049656.11akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(97.067, 0.0, 0.0),'Pos': Point3(-42.324, -44.263, -0.163),'Scale': VBase3(0.994, 1.0, 1.704),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1187049812.36akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(47.704, 0.0, 0.0),'Pos': Point3(-49.981, 40.956, -0.137),'Scale': VBase3(0.961, 1.0, 1.183),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1187049934.06akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-22.663, 44.486, -0.153),'Scale': VBase3(5.47, 2.343, 1.896),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1187050266.14akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(147.397, 0.0, 0.0),'Pos': Point3(-47.056, -36.757, -0.183),'Scale': VBase3(0.994, 1.0, 1.704),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1187050742.97akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-10.079, 46.003, -0.431),'Scale': VBase3(2.068, 1.442, 2.682),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1187050855.61akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(43.536, 46.395, -0.122),'Scale': VBase3(2.406, 1.414, 1.683),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1187050916.44akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(-91.119, 0.0, 0.0),'Pos': Point3(49.37, 39.467, -0.242),'Scale': VBase3(1.413, 1.651, 1.421),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1187051589.0akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(173.147, 0.0, 0.0),'Pos': Point3(51.288, -34.249, 0.0),'Scale': VBase3(1.0, 1.0, 2.059),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1187051619.05akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(223.667, 0.0, 0.0),'Pos': Point3(42.715, -37.27, -0.002),'Scale': VBase3(1.082, 1.0, 2.059),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1187051675.14akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-175.231, 0.0, 0.0),'Pos': Point3(38.645, -40.537, 0.0),'Scale': VBase3(0.482, 1.0, 2.059),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1187052553.77akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(128.024, 0.0, 0.0),'Pos': Point3(23.218, -35.156, -0.261),'Scale': VBase3(0.472, 1.0, 1.067),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1187052662.28akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(128.024, 0.0, 0.0),'Pos': Point3(22.596, -39.782, 0.0),'Scale': VBase3(0.472, 1.0, 1.029),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1187052663.47akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-178.914, 0.0, 0.0),'Pos': Point3(22.229, -37.215, -0.159),'Scale': VBase3(1.707, 2.04, 1.848),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1187053153.59akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(3.061, 0.0, 0.0),'Pos': Point3(-0.568, -44.293, -0.097),'Scale': VBase3(4.659, 3.104, 2.188),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1187053246.66akelts': {'Type': 'Crate','DisableCollision': True,'Hpr': VBase3(-15.636, 0.0, 0.0),'Pos': Point3(-5.263, -38.664, 0.0),'Scale': VBase3(0.885, 0.885, 0.885),'Visual': {'Model': 'models/props/crates_group_2'}},'1187053434.09akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(143.38, 0.0, 0.0),'Pos': Point3(-49.351, 46.425, -0.147),'Scale': VBase3(0.666, 1.0, 1.183),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1187053464.33akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(88.218, 0.0, 0.0),'Pos': Point3(-50.839, 48.643, -0.168),'Scale': VBase3(0.325, 1.0, 1.183),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1187112270.22akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-1.824, -22.481, -6.203),'Scale': VBase3(1.069, 0.879, 1.364),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1200537088.0jubutler0': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(-13.755, -13.602, 0.267),'Scale': VBase3(1.0, 1.0, 1.0)},'1200537088.0jubutler1': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-20.796, -49.698, -0.234),'Scale': VBase3(1.0, 1.0, 1.0),'TargetUIDs': ['1190757402.45joswilso']},'1212699106.33kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(58.036, -35.92, 0.271),'Scale': VBase3(0.862, 0.862, 0.862),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_tube'}},'1279045770.91caoconno': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(-44.734, -0.0, 0.0),'Pos': Point3(43.229, 41.048, 0.0),'Scale': VBase3(1.0, 1.0, 1.9),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1279045830.36caoconno': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(-21.719, -0.0, 0.0),'Pos': Point3(56.055, 34.225, -0.986),'Scale': VBase3(1.0, 1.0, 1.711),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}},'Visual': {'Model': 'models/buildings/interior_storage_tutorial'}}},'Layers': {'Collisions': ['1184008208.59kmuller', '1184016064.62kmuller', '1184013852.84kmuller', '1185822696.06kmuller', '1184006140.32kmuller', '1184002350.98kmuller', '1184007573.29kmuller', '1184021176.59kmuller', '1184005963.59kmuller', '1188324241.31akelts', '1184006537.34kmuller', '1184006605.81kmuller', '1187139568.33kmuller', '1188324186.98akelts', '1184006730.66kmuller', '1184007538.51kmuller', '1184006188.41kmuller', '1184021084.27kmuller', '1185824396.94kmuller', '1185824250.16kmuller', '1185823630.52kmuller', '1185823760.23kmuller', '1185824497.83kmuller', '1185824751.45kmuller', '1187739103.34akelts', '1188323993.34akelts', '1184016538.29kmuller', '1185822200.97kmuller', '1184016225.99kmuller', '1195241421.34akelts', '1195242796.08akelts', '1184020642.13kmuller', '1195237994.63akelts', '1184020756.88kmuller', '1184020833.4kmuller', '1185820992.97kmuller', '1185821053.83kmuller', '1184015068.54kmuller', '1184014935.82kmuller', '1185821432.88kmuller', '1185821701.86kmuller', '1195240137.55akelts', '1195241539.38akelts', '1195238422.3akelts', '1195238473.22akelts', '1185821453.17kmuller', '1184021269.96kmuller', '1185821310.89kmuller', '1185821165.59kmuller', '1185821199.36kmuller', '1185822035.98kmuller', '1184015806.59kmuller', '1185822059.48kmuller', '1185920461.76kmuller', '1194984449.66akelts', '1185824206.22kmuller', '1184003446.23kmuller', '1184003254.85kmuller', '1184003218.74kmuller', '1184002700.44kmuller', '1186705073.11kmuller', '1187658531.86akelts', '1186705214.3kmuller', '1185824927.28kmuller', '1184014204.54kmuller', '1184014152.84kmuller']},'ObjectIds': {'1152830677.95jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1152830677.95jubutler"]','1159905354.84jubutler': '["Objects"]["1159905354.84jubutler"]','1159918729.95jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159918729.95jubutler"]','1159918893.65jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159918893.65jubutler"]','1159918930.42jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159918930.42jubutler"]','1159918935.67jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159918935.67jubutler"]','1159924057.48jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159924057.48jubutler"]','1159924096.73jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159924096.73jubutler"]','1159924154.7jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159924057.48jubutler"]["Objects"]["1159924154.7jubutler"]','1159924766.73jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159924766.73jubutler"]','1159924775.59jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159924775.59jubutler"]','1159924787.12jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159924787.12jubutler"]','1159924828.45jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159924828.45jubutler"]','1159924880.81jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159924880.81jubutler"]','1159925037.63jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159925037.63jubutler"]','1159925055.62jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159925055.62jubutler"]','1159925061.38jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159925061.38jubutler"]','1159925071.65jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159925071.65jubutler"]','1159925600.84jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159925600.84jubutler"]','1159925605.49jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159925605.49jubutler"]','1159925613.32jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159925613.32jubutler"]','1159925616.7jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159925616.7jubutler"]','1159925659.09jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159925659.09jubutler"]','1159925780.04jubutler': '["Objects"]["1159905354.84jubutler"]["Objects"]["1159925780.04jubutler"]','1170980361.73dzlu': '["Objects"]["1159905354.84jubutler"]["Objects"]["1170980361.73dzlu"]','1170980523.23dzlu': '["Objects"]["1159905354.84jubutler"]["Objects"]["1170980523.23dzlu"]','1170980694.88dzlu': '["Objects"]["1159905354.84jubutler"]["Objects"]["1170980694.88dzlu"]','1171097747.0MAsaduzz': '["Objects"]["1159905354.84jubutler"]["Objects"]["1171097747.0MAsaduzz"]','1175046030.53dzlu': '["Objects"]["1159905354.84jubutler"]["Objects"]["1175046030.53dzlu"]','1175820082.68dzlu': '["Objects"]["1159905354.84jubutler"]["Objects"]["1175820082.68dzlu"]','1177982558.47kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177982558.47kmuller"]','1177982584.22kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177982584.22kmuller"]','1177982656.34kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177982656.34kmuller"]','1177982763.45kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177982763.45kmuller"]','1177982906.03kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177982906.03kmuller"]','1177982920.28kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177982920.28kmuller"]','1177982957.33kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177982957.33kmuller"]','1177982993.03kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177982993.03kmuller"]','1177983026.17kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177983026.17kmuller"]','1177983102.81kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177983102.81kmuller"]','1177983164.17kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177983164.17kmuller"]','1177983219.08kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177983219.08kmuller"]','1177983306.06kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177983306.06kmuller"]','1177983368.98kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177983368.98kmuller"]','1177983477.67kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177983477.67kmuller"]','1177983618.61kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177983618.61kmuller"]','1177983728.36kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177983728.36kmuller"]','1177983806.69kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177983806.69kmuller"]','1177984119.81kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1177984119.81kmuller"]','1187049656.11akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187049656.11akelts"]','1187049812.36akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187049812.36akelts"]','1187049934.06akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187049934.06akelts"]','1187050266.14akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187050266.14akelts"]','1187050742.97akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187050742.97akelts"]','1187050855.61akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187050855.61akelts"]','1187050916.44akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187050916.44akelts"]','1187051589.0akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187051589.0akelts"]','1187051619.05akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187051619.05akelts"]','1187051675.14akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187051675.14akelts"]','1187052553.77akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187052553.77akelts"]','1187052662.28akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187052662.28akelts"]','1187052663.47akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187052663.47akelts"]','1187053153.59akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187053153.59akelts"]','1187053246.66akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187053246.66akelts"]','1187053434.09akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187053434.09akelts"]','1187053464.33akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187053464.33akelts"]','1187112270.22akelts': '["Objects"]["1159905354.84jubutler"]["Objects"]["1187112270.22akelts"]','1200537088.0jubutler0': '["Objects"]["1159905354.84jubutler"]["Objects"]["1200537088.0jubutler0"]','1200537088.0jubutler1': '["Objects"]["1159905354.84jubutler"]["Objects"]["1200537088.0jubutler1"]','1212699106.33kmuller': '["Objects"]["1159905354.84jubutler"]["Objects"]["1212699106.33kmuller"]','1279045770.91caoconno': '["Objects"]["1159905354.84jubutler"]["Objects"]["1279045770.91caoconno"]','1279045830.36caoconno': '["Objects"]["1159905354.84jubutler"]["Objects"]["1279045830.36caoconno"]'}}
extraInfo = {'camPos': Point3(-91.5411, 13.8317, 51.4597),'camHpr': VBase3(99.802, -30.7888, -1.98769e-06),'focalLength': 0.819180488586,'skyState': -1,'fog': 0}
| 9,477.666667
| 28,212
| 0.68621
| 3,855
| 28,433
| 5.016083
| 0.198703
| 0.025961
| 0.024978
| 0.020893
| 0.475979
| 0.411388
| 0.370947
| 0.274241
| 0.247867
| 0.238351
| 0
| 0.290474
| 0.048922
| 28,433
| 3
| 28,213
| 9,477.666667
| 0.424599
| 0
| 0
| 0
| 0
| 0
| 0.574242
| 0.280791
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b91d346d9df2ab3d7dfb768bc70b89cedfd869a8
| 49
|
py
|
Python
|
indra/sparser/__init__.py
|
jmuhlich/indra
|
feab2c08541ea73f328579faa6a21b08082cb026
|
[
"BSD-2-Clause"
] | null | null | null |
indra/sparser/__init__.py
|
jmuhlich/indra
|
feab2c08541ea73f328579faa6a21b08082cb026
|
[
"BSD-2-Clause"
] | null | null | null |
indra/sparser/__init__.py
|
jmuhlich/indra
|
feab2c08541ea73f328579faa6a21b08082cb026
|
[
"BSD-2-Clause"
] | null | null | null |
from indra.sparser.sparser_api import process_xml
| 49
| 49
| 0.897959
| 8
| 49
| 5.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061224
| 49
| 1
| 49
| 49
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b91f0823c80db62b28c4cc540538ef76699f20de
| 17,632
|
py
|
Python
|
test/test_unit_split_statement.py
|
Snagajob/snowflake-connector-python
|
be43eebae138b0040fb973ba470b7575bae97a0f
|
[
"Apache-2.0"
] | 1
|
2020-08-04T08:30:42.000Z
|
2020-08-04T08:30:42.000Z
|
test/test_unit_split_statement.py
|
Snagajob/snowflake-connector-python
|
be43eebae138b0040fb973ba470b7575bae97a0f
|
[
"Apache-2.0"
] | null | null | null |
test/test_unit_split_statement.py
|
Snagajob/snowflake-connector-python
|
be43eebae138b0040fb973ba470b7575bae97a0f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved.
#
from io import StringIO
import pytest
from snowflake.connector.util_text import split_statements
def test_simple_sql():
with StringIO("show tables") as f:
itr = split_statements(f)
assert next(itr) == ('show tables', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO("show tables;") as f:
itr = split_statements(f)
assert next(itr) == ('show tables;', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO("select 1;select 2") as f:
itr = split_statements(f)
assert next(itr) == ('select 1;', False)
assert next(itr) == ('select 2', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO("select 1;select 2;") as f:
itr = split_statements(f)
assert next(itr) == ('select 1;', False)
assert next(itr) == ('select 2;', False)
with pytest.raises(StopIteration):
next(itr)
s = "select 1; -- test"
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == ('select 1; -- test', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ('select 1;', False)
with pytest.raises(StopIteration):
next(itr)
s = "select /* test */ 1; -- test comment select 1;"
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == (
'select /* test */ 1; -- test comment select 1;', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ('select 1;', False)
with pytest.raises(StopIteration):
next(itr)
def test_multiple_line_sql():
s = """select /* test */ 1; -- test comment
select 23;"""
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == (
('select /* test */ 1; -- test comment', False))
assert next(itr) == ('select 23;', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ('select 1;', False)
assert next(itr) == ('select 23;', False)
with pytest.raises(StopIteration):
next(itr)
s = """select /* test */ 1; -- test comment
select 23; -- test comment 2"""
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == (
'select /* test */ 1; -- test comment', False)
assert next(itr) == ('select 23; -- test comment 2', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ('select 1;', False)
assert next(itr) == ('select 23;', False)
with pytest.raises(StopIteration):
next(itr)
s = """select /* test */ 1; -- test comment
select 23; /* test comment 2 */ select 3"""
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == (
'select /* test */ 1; -- test comment', False)
assert next(itr) == ('select 23;', False)
assert next(itr) == ('/* test comment 2 */ select 3', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ('select 1;', False)
assert next(itr) == ('select 23;', False)
assert next(itr) == ('select 3', False)
with pytest.raises(StopIteration):
next(itr)
s = """select /* test */ 1; -- test comment
select 23; /* test comment 2
*/ select 3;"""
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == (
"select /* test */ 1; -- test comment", False)
assert next(itr) == ("select 23;", False)
assert next(itr) == ("/* test comment 2\n*/ select 3;", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("select 1;", False)
assert next(itr) == ("select 23;", False)
assert next(itr) == ("select 3;", False)
with pytest.raises(StopIteration):
next(itr)
s = """select /* test
continued comments 1
continued comments 2
*/ 1; -- test comment
select 23; /* test comment 2
*/ select 3;"""
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == ("select /* test\n"
" continued comments 1\n"
" continued comments 2\n"
" */ 1; -- test comment", False)
assert next(itr) == ("select 23;", False)
assert next(itr) == ("/* test comment 2\n*/ select 3;", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("select 1;", False)
assert next(itr) == ("select 23;", False)
assert next(itr) == ("select 3;", False)
with pytest.raises(StopIteration):
next(itr)
def test_quotes():
s = """select 'hello', 1; -- test comment
select 23,'hello"""
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == (
"select 'hello', 1; -- test comment", False)
assert next(itr) == ("select 23,'hello", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("select 'hello', 1;", False)
assert next(itr) == ("select 23,'hello", False)
with pytest.raises(StopIteration):
next(itr)
s = """select 'he"llo', 1; -- test comment
select "23,'hello" """
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == (
"select 'he\"llo', 1; -- test comment", False)
assert next(itr) == ("select \"23,'hello\"", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("select 'he\"llo', 1;", False)
assert next(itr) == ("select \"23,'hello\"", False)
with pytest.raises(StopIteration):
next(itr)
s = """select 'hello
', 1; -- test comment
select "23,'hello" """
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == (
"select 'hello\n', 1; -- test comment", False)
assert next(itr) == ("select \"23,'hello\"", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("select 'hello\n', 1;", False)
assert next(itr) == ("select \"23,'hello\"", False)
with pytest.raises(StopIteration):
next(itr)
s = """select 'hello''
', 1; -- test comment
select "23,'','hello" """
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == (
"select 'hello''\n', 1; -- test comment", False)
assert next(itr) == ("select \"23,'','hello\"", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("select 'hello''\n', 1;", False)
assert next(itr) == ("select \"23,'','hello\"", False)
with pytest.raises(StopIteration):
next(itr)
def test_quotes_in_comments():
s = """select 'hello'; -- test comment 'hello2' in comment
/* comment 'quote'*/ select true
"""
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == (
"select 'hello'; -- test comment 'hello2' in comment", False)
assert next(itr) == (
"/* comment 'quote'*/ select true", False)
with pytest.raises(StopIteration):
next(itr)
def test_backslash():
"""
Test backslash in a literal.
Note the backslash is escaped in a Python string literal. Double backslashes
in a string literal represents a single backslash.
"""
s = """select 'hello\\\\', 1; -- test comment
select 23,'\nhello"""
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == (
"select 'hello\\\\', 1; -- test comment", False)
assert next(itr) == ("select 23,'\nhello", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("select 'hello\\\\', 1;", False)
assert next(itr) == ("select 23,'\nhello", False)
with pytest.raises(StopIteration):
next(itr)
def test_file_with_slash_star():
s = """put file:///tmp/* @%tmp;
ls @%tmp;"""
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == ("put file:///tmp/* @%tmp;", True)
assert next(itr) == ("ls @%tmp;", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("put file:///tmp/* @%tmp;", True)
assert next(itr) == ("ls @%tmp;", False)
with pytest.raises(StopIteration):
next(itr)
s = """list @~;
-- first half
put file://$SELF_DIR/staging-test-data/*.csv.gz @~;
put file://$SELF_DIR/staging-test-data/foo.csv.gz @~;
put file://$SELF_DIR/staging-test-data/foo.csv.gz @~ overwrite=true;
-- second half
put file://$SELF_DIR/staging-test-data/foo.csv.gz @~/foo;
put file://$SELF_DIR/staging-test-data/bar.csv.gz @~/bar;
list @~;
remove @~ pattern='.*.csv.gz';
list @~;
"""
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == ("list @~;", False)
# no comment line is returned
assert next(itr) == (
"-- first half\n"
"put file://$SELF_DIR/staging-test-data/*.csv.gz @~;", True)
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/foo.csv.gz @~;", True)
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/foo.csv.gz @~ "
"overwrite=true;", True)
# no comment line is returned
assert next(itr) == (
"-- second half\n"
"put file://$SELF_DIR/staging-test-data/foo.csv.gz @~/foo;", True)
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/bar.csv.gz @~/bar;", True)
# no empty line is returned
assert next(itr) == ("list @~;", False)
assert next(itr) == ("remove @~ pattern='.*.csv.gz';", False)
assert next(itr) == ("list @~;", False)
# last raises StopIteration
with pytest.raises(StopIteration):
next(itr)
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("list @~;", False)
# no comment line is returned
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/*.csv.gz @~;", True)
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/foo.csv.gz @~;", True)
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/foo.csv.gz @~ "
"overwrite=true;", True)
# no comment line is returned
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/foo.csv.gz @~/foo;", True)
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/bar.csv.gz @~/bar;", True)
# no empty line is returned
assert next(itr) == ("list @~;", False)
assert next(itr) == ("remove @~ pattern='.*.csv.gz';", False)
assert next(itr) == ("list @~;", False)
# last raises StopIteration
with pytest.raises(StopIteration):
next(itr)
def test_sql_with_commands():
with StringIO("""create or replace view aaa
as select * from
LINEITEM limit 1000;
!spool $outfile
show views like 'AAA';
!spool off
drop view if exists aaa;
show tables""") as f:
itr = split_statements(f)
assert next(itr) == ("""create or replace view aaa
as select * from
LINEITEM limit 1000;""", False)
assert next(itr) == ("""!spool $outfile""", False)
assert next(itr) == ("show views like 'AAA';", False)
assert next(itr) == ("!spool off", False)
assert next(itr) == ("drop view if exists aaa;", False)
assert next(itr) == ("show tables", False)
with pytest.raises(StopIteration):
next(itr)
def test_sql_example1():
with StringIO("""
create or replace table a(aa int, bb string);
truncate a;
rm @%a;
put file://a.txt @%a;
copy into a;
select * from a;
drop table if exists a;""") as f:
itr = split_statements(f)
assert next(itr) == (
"create or replace table a(aa int, bb string);", False)
assert next(itr) == ("truncate a;", False)
assert next(itr) == ("rm @%a;", False)
assert next(itr) == ("put file://a.txt @%a;", True)
assert next(itr) == ("copy into a;", False)
assert next(itr) == ("select * from a;", False)
assert next(itr) == ("drop table if exists a;", False)
with pytest.raises(StopIteration):
next(itr)
def test_space_before_put():
with StringIO("""
-- sample data uploads
PUT file:///tmp/data.txt @%ab;
SELECT 1; /* 134 */ select /* 567*/ 345;>
GET @%bcd file:///tmp/aaa.txt;
""") as f:
itr = split_statements(f)
assert next(itr) == ("""-- sample data uploads
PUT file:///tmp/data.txt @%ab;""", True)
assert next(itr) == ("""SELECT 1;""", False)
assert next(itr) == ("""/* 134 */ select /* 567*/ 345;>""", False)
assert next(itr) == ("""GET @%bcd file:///tmp/aaa.txt;""", True)
with pytest.raises(StopIteration):
next(itr)
def test_empty_statement():
with StringIO("""select 1;
-- tail comment1
-- tail comment2
""") as f:
itr = split_statements(f)
assert next(itr) == ("""select 1;""", False)
assert next(itr) == ("""-- tail comment1
-- tail comment2""", None)
with pytest.raises(StopIteration):
next(itr)
def test_multiple_comments():
s = """--- test comment 1
select /*another test comments*/ 1; -- test comment 2
-- test comment 3
select 2;
"""
with StringIO(s) as f:
itr = split_statements(f, remove_comments=False)
assert next(itr) == (
"--- test comment 1\n"
"select /*another test comments*/ 1; -- test comment 2", False)
assert next(itr) == ("-- test comment 3\nselect 2;", False)
def test_comments_with_semicolon():
s = """--test ;
select 1;
"""
with StringIO(s) as f:
itr = split_statements(f, remove_comments=False)
assert next(itr) == (
"--test ;\n"
"select 1;", False
)
with pytest.raises(StopIteration):
next(itr)
def test_comment_in_values():
"""
SNOW-51297: SnowSQL -o remove_comments=True breaks the query
"""
# no space before a comment
s = """INSERT INTO foo
VALUES (/*TIMEOUT*/ 10);"""
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == (
"INSERT INTO foo\nVALUES ( 10);", False
)
# no space before and after a comment
s = """INSERT INTO foo
VALUES (/*TIMEOUT*/10);"""
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == (
"INSERT INTO foo\nVALUES (10);", False
)
# workaround
s = """INSERT INTO foo
VALUES ( /*TIMEOUT*/ 10);"""
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == (
"INSERT INTO foo\nVALUES ( 10);", False
)
# a comment start from the beginning of the line
s = """INSERT INTO foo VALUES (
/*TIMEOUT*/
10);"""
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == (
"INSERT INTO foo VALUES (\n\n10);", False
)
def test_multiline_double_dollar_experssion_with_removed_comments():
s = """CREATE FUNCTION mean(a FLOAT, b FLOAT)
RETURNS FLOAT LANGUAGE JAVASCRIPT AS $$
var c = a + b;
return(c / 2);
$$;"""
with StringIO(s) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == (
"CREATE FUNCTION mean(a FLOAT, b FLOAT)\n"
" RETURNS FLOAT LANGUAGE JAVASCRIPT AS $$\n"
" var c = a + b;\n return(c / 2);\n $$;", False)
def test_backslash_quote_escape():
s = """
SELECT 1 'Snowflake\\'s 1';
SELECT 2 'Snowflake\\'s 2'
"""
with StringIO(s) as f:
itr = split_statements(f)
assert next(itr) == ("SELECT 1 'Snowflake\\'s 1';", False)
assert next(itr) == ("SELECT 2 'Snowflake\\'s 2'", False)
| 33.584762
| 80
| 0.556488
| 2,198
| 17,632
| 4.406733
| 0.080528
| 0.106236
| 0.146294
| 0.111811
| 0.869193
| 0.823044
| 0.814887
| 0.798472
| 0.778753
| 0.737972
| 0
| 0.017121
| 0.284483
| 17,632
| 524
| 81
| 33.648855
| 0.750634
| 0.037716
| 0
| 0.640732
| 0
| 0.002288
| 0.295412
| 0.039558
| 0
| 0
| 0
| 0
| 0.249428
| 1
| 0.034325
| false
| 0
| 0.006865
| 0
| 0.04119
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b923d73904d86b5b956bb697efa4986fedb14504
| 37
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_carpet.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_carpet.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_carpet.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
from plotly.graph_objs import Carpet
| 18.5
| 36
| 0.864865
| 6
| 37
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b93f1ed2b913d7d0865c992533c7d0ae1b873280
| 304
|
py
|
Python
|
configs/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_bop_test/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_bop_test_16_36WoodBlock.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 33
|
2021-12-15T07:11:47.000Z
|
2022-03-29T08:58:32.000Z
|
configs/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_bop_test/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_bop_test_16_36WoodBlock.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 3
|
2021-12-15T11:39:54.000Z
|
2022-03-29T07:24:23.000Z
|
configs/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_bop_test/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_bop_test_16_36WoodBlock.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | null | null | null |
_base_ = "./resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_bop_test_01_02MasterChefCan.py"
OUTPUT_DIR = (
"output/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/16_36WoodBlock"
)
DATASETS = dict(TRAIN=("ycbv_036_wood_block_train_pbr",))
| 50.666667
| 114
| 0.871711
| 38
| 304
| 6.263158
| 0.736842
| 0.201681
| 0.235294
| 0.294118
| 0.529412
| 0.529412
| 0.529412
| 0.529412
| 0
| 0
| 0
| 0.100346
| 0.049342
| 304
| 5
| 115
| 60.8
| 0.723183
| 0
| 0
| 0
| 0
| 0
| 0.786184
| 0.786184
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b961076f7f3975a5a2522ee4d825d5b63674fce7
| 1,510
|
py
|
Python
|
LeavesAppBE/database/admin.py
|
averyscottnorris/LeavesCalculator
|
58c9c8779e7ccb2c6bb0282efe636af9017b9c74
|
[
"MIT"
] | null | null | null |
LeavesAppBE/database/admin.py
|
averyscottnorris/LeavesCalculator
|
58c9c8779e7ccb2c6bb0282efe636af9017b9c74
|
[
"MIT"
] | 87
|
2019-09-03T22:06:40.000Z
|
2021-06-10T19:04:35.000Z
|
LeavesAppBE/database/admin.py
|
averyscottnorris/LeavesCalculator
|
58c9c8779e7ccb2c6bb0282efe636af9017b9c74
|
[
"MIT"
] | 1
|
2020-11-02T19:27:23.000Z
|
2020-11-02T19:27:23.000Z
|
from import_export.admin import ImportExportModelAdmin
from django.contrib import admin
from .models import gobeacc, goremal, gorsdav, nbrjobs, nbrbjob, spriden, pebempl, phraccr, perleav, perefml, perfmla, perbfml, perjtot, ptrearn, pdrdedn
@admin.register(gobeacc)
class gobeaccAdmin(ImportExportModelAdmin):
pass
@admin.register(goremal)
class goremalAdmin(ImportExportModelAdmin):
pass
@admin.register(gorsdav)
class gorsdavAdmin(ImportExportModelAdmin):
pass
@admin.register(nbrbjob)
class nbrbjobAdmin(ImportExportModelAdmin):
pass
@admin.register(nbrjobs)
class nbrjobsAdmin(ImportExportModelAdmin):
pass
@admin.register(spriden)
class spridenAdmin(ImportExportModelAdmin):
pass
@admin.register(pebempl)
class pebemplAdmin(ImportExportModelAdmin):
pass
@admin.register(phraccr)
class phraccrAdmin(ImportExportModelAdmin):
pass
@admin.register(perleav)
class perleavAdmin(ImportExportModelAdmin):
pass
@admin.register(perefml)
class perefmlAdmin(ImportExportModelAdmin):
pass
@admin.register(perfmla)
class perefmlaAdmin(ImportExportModelAdmin):
pass
@admin.register(perbfml)
class perebfmlAdmin(ImportExportModelAdmin):
pass
@admin.register(perjtot)
class perjtotAdmin(ImportExportModelAdmin):
pass
@admin.register(ptrearn)
class ptrearnAdmin(ImportExportModelAdmin):
pass
@admin.register(pdrdedn)
class pdedednAdmin(ImportExportModelAdmin):
pass
| 20.684932
| 154
| 0.75894
| 134
| 1,510
| 8.544776
| 0.30597
| 0.170306
| 0.379039
| 0.476856
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160265
| 1,510
| 72
| 155
| 20.972222
| 0.902997
| 0
| 0
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.3125
| 0.375
| 0
| 0.6875
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b990abdec6064a2fd691af53bd92974e13988d8d
| 116
|
py
|
Python
|
resttest/service/__init__.py
|
CzarSimon/resttest
|
be58ce2a565713bb413e814e5d5790cb46446c0b
|
[
"MIT"
] | null | null | null |
resttest/service/__init__.py
|
CzarSimon/resttest
|
be58ce2a565713bb413e814e5d5790cb46446c0b
|
[
"MIT"
] | null | null | null |
resttest/service/__init__.py
|
CzarSimon/resttest
|
be58ce2a565713bb413e814e5d5790cb46446c0b
|
[
"MIT"
] | null | null | null |
from .random_port import get_random_port
from .reader import Reader, FileReader
from .test_service import run_tests
| 29
| 40
| 0.853448
| 18
| 116
| 5.222222
| 0.611111
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112069
| 116
| 3
| 41
| 38.666667
| 0.912621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b9a325c03e18860390c917165329f0c5189d84da
| 67,027
|
py
|
Python
|
flair/datasets/treebanks.py
|
lukasgarbas/flair
|
bba5b5ca2c5ae4e77c4abce2fa429aa29a4bd85b
|
[
"MIT"
] | 1
|
2022-01-25T03:43:15.000Z
|
2022-01-25T03:43:15.000Z
|
flair/datasets/treebanks.py
|
AshishMahendra/flair-1
|
041c85cf3d45940dccd453fc350767c1c85aad49
|
[
"MIT"
] | 1
|
2022-01-12T07:50:13.000Z
|
2022-01-12T10:08:44.000Z
|
flair/datasets/treebanks.py
|
AshishMahendra/flair-1
|
041c85cf3d45940dccd453fc350767c1c85aad49
|
[
"MIT"
] | 1
|
2019-08-11T12:08:31.000Z
|
2019-08-11T12:08:31.000Z
|
import logging
import re
from pathlib import Path
from typing import List, Union
import flair
from flair.data import Corpus, FlairDataset, Sentence, Token
from flair.datasets.base import find_train_dev_test_files
from flair.file_utils import cached_path
log = logging.getLogger("flair")
class UniversalDependenciesCorpus(Corpus):
def __init__(
self,
data_folder: Union[str, Path],
train_file=None,
test_file=None,
dev_file=None,
in_memory: bool = True,
split_multiwords: bool = True,
):
"""
Instantiates a Corpus from CoNLL-U column-formatted task data such as the UD corpora
:param data_folder: base folder with the task data
:param train_file: the name of the train file
:param test_file: the name of the test file
:param dev_file: the name of the dev file, if None, dev data is sampled from train
:param in_memory: If set to True, keeps full dataset in memory, otherwise does disk reads
:param split_multiwords: If set to True, multiwords are split (default), otherwise kept as single tokens
:return: a Corpus with annotated train, dev and test data
"""
# find train, dev and test files if not specified
dev_file, test_file, train_file = find_train_dev_test_files(data_folder, dev_file, test_file, train_file)
# get train data
train = UniversalDependenciesDataset(train_file, in_memory=in_memory, split_multiwords=split_multiwords)
# get test data
test = (
UniversalDependenciesDataset(test_file, in_memory=in_memory, split_multiwords=split_multiwords)
if test_file is not None
else None
)
# get dev data
dev = (
UniversalDependenciesDataset(dev_file, in_memory=in_memory, split_multiwords=split_multiwords)
if dev_file is not None
else None
)
super(UniversalDependenciesCorpus, self).__init__(train, dev, test, name=str(data_folder))
class UniversalDependenciesDataset(FlairDataset):
def __init__(
self,
path_to_conll_file: Union[str, Path],
in_memory: bool = True,
split_multiwords: bool = True,
):
"""
Instantiates a column dataset in CoNLL-U format.
:param path_to_conll_file: Path to the CoNLL-U formatted file
:param in_memory: If set to True, keeps full dataset in memory, otherwise does disk reads
"""
path_to_conll_file = Path(path_to_conll_file)
assert path_to_conll_file.exists()
self.in_memory: bool = in_memory
self.split_multiwords: bool = split_multiwords
self.path_to_conll_file = path_to_conll_file
self.total_sentence_count: int = 0
with open(str(self.path_to_conll_file), encoding="utf-8") as file:
# option 1: read only sentence boundaries as offset positions
if not self.in_memory:
self.indices: List[int] = []
line = file.readline()
position = 0
while line:
line = line.strip()
if line == "":
self.indices.append(position)
position = file.tell()
line = file.readline()
self.total_sentence_count = len(self.indices)
# option 2: keep everything in memory
if self.in_memory:
self.sentences: List[Sentence] = []
while True:
sentence = self._read_next_sentence(file)
if not sentence:
break
self.sentences.append(sentence)
self.total_sentence_count = len(self.sentences)
def is_in_memory(self) -> bool:
return self.in_memory
def __len__(self):
return self.total_sentence_count
def __getitem__(self, index: int = 0) -> Sentence:
# if in memory, retrieve parsed sentence
if self.in_memory:
sentence = self.sentences[index]
# else skip to position in file where sentence begins
else:
with open(str(self.path_to_conll_file), encoding="utf-8") as file:
file.seek(self.indices[index])
sentence = self._read_next_sentence(file)
return sentence
def _read_next_sentence(self, file):
line = file.readline()
sentence: Sentence = Sentence()
# current token ID
token_idx = 0
# handling for the awful UD multiword format
current_multiword_text = ""
current_multiword_sequence = ""
current_multiword_first_token = 0
current_multiword_last_token = 0
while line:
line = line.strip()
fields: List[str] = re.split("\t+", line)
# end of sentence
if line == "":
if len(sentence) > 0:
break
# comments
elif line.startswith("#"):
line = file.readline()
continue
# ellipsis
elif "." in fields[0]:
line = file.readline()
continue
# if token is a multi-word
elif "-" in fields[0]:
line = file.readline()
current_multiword_first_token = int(fields[0].split("-")[0])
current_multiword_last_token = int(fields[0].split("-")[1])
current_multiword_text = fields[1]
current_multiword_sequence = ""
if self.split_multiwords:
continue
else:
token = Token(fields[1])
token.add_label("lemma", str(fields[2]))
if len(fields) > 9 and "SpaceAfter=No" in fields[9]:
token.whitespace_after = False
sentence.add_token(token)
token_idx += 1
# normal single-word tokens
else:
# if we don't split multiwords, skip over component words
if not self.split_multiwords and token_idx < current_multiword_last_token:
token_idx += 1
line = file.readline()
continue
# add token
token = Token(fields[1], head_id=int(fields[6]))
token.add_label("lemma", str(fields[2]))
token.add_label("upos", str(fields[3]))
token.add_label("pos", str(fields[4]))
token.add_label("dependency", str(fields[7]))
if len(fields) > 9 and "SpaceAfter=No" in fields[9]:
token.whitespace_after = False
# add morphological tags
for morph in str(fields[5]).split("|"):
if "=" not in morph:
continue
token.add_label(morph.split("=")[0].lower(), morph.split("=")[1])
if len(fields) > 10 and str(fields[10]) == "Y":
token.add_label("frame", str(fields[11]))
token_idx += 1
# derive whitespace logic for multiwords
if token_idx <= current_multiword_last_token:
current_multiword_sequence += token.text
# print(token)
# print(current_multiword_last_token)
# print(current_multiword_first_token)
# if multi-word equals component tokens, there should be no whitespace
if token_idx == current_multiword_last_token and current_multiword_sequence == current_multiword_text:
# go through all tokens in subword and set whitespace_after information
for i in range(current_multiword_last_token - current_multiword_first_token):
# print(i)
sentence[-(i + 1)].whitespace_after = False
sentence.add_token(token)
line = file.readline()
return sentence
class UD_ENGLISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_English-EWT/master"
cached_path(f"{web_path}/en_ewt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/en_ewt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/en_ewt-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_ENGLISH, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_GALICIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Galician-TreeGal/master"
cached_path(f"{web_path}/gl_treegal-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/gl_treegal-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_GALICIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_ANCIENT_GREEK(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Ancient_Greek-PROIEL/master"
cached_path(f"{web_path}/grc_proiel-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/grc_proiel-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/grc_proiel-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_ANCIENT_GREEK, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_KAZAKH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Kazakh-KTB/master"
cached_path(f"{web_path}/kk_ktb-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/kk_ktb-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_KAZAKH, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_OLD_CHURCH_SLAVONIC(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Old_Church_Slavonic-PROIEL/master"
cached_path(f"{web_path}/cu_proiel-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/cu_proiel-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/cu_proiel-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_OLD_CHURCH_SLAVONIC, self).__init__(
data_folder, in_memory=in_memory, split_multiwords=split_multiwords
)
class UD_ARMENIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Armenian-ArmTDP/master/"
cached_path(f"{web_path}/hy_armtdp-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/hy_armtdp-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/hy_armtdp-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_ARMENIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_ESTONIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Estonian-EDT/master"
cached_path(f"{web_path}/et_edt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/et_edt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/et_edt-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_ESTONIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_GERMAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_German-GSD/master"
cached_path(f"{ud_path}/de_gsd-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/de_gsd-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/de_gsd-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_GERMAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_GERMAN_HDT(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = False,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_German-HDT/dev"
cached_path(f"{ud_path}/de_hdt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/de_hdt-ud-test.conllu", Path("datasets") / dataset_name)
train_filenames = [
"de_hdt-ud-train-a-1.conllu",
"de_hdt-ud-train-a-2.conllu",
"de_hdt-ud-train-b-1.conllu",
"de_hdt-ud-train-b-2.conllu",
]
for train_file in train_filenames:
cached_path(f"{ud_path}/{train_file}", Path("datasets") / dataset_name / "original")
data_path = flair.cache_root / "datasets" / dataset_name
new_train_file: Path = data_path / "de_hdt-ud-train-all.conllu"
if not new_train_file.is_file():
with open(new_train_file, "wt") as f_out:
for train_filename in train_filenames:
with open(data_path / "original" / train_filename, "rt") as f_in:
f_out.write(f_in.read())
super(UD_GERMAN_HDT, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_DUTCH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Dutch-Alpino/master"
cached_path(f"{ud_path}/nl_alpino-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/nl_alpino-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/nl_alpino-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_DUTCH, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_FAROESE(UniversalDependenciesCorpus):
"""This treebank includes the Faroese treebank dataset from the following link:
https://github.com/UniversalDependencies/UD_Faroese-FarPaHC/tree/master
Faronese is a small Western Scandinavian language with 60.000-100.000, related to Icelandic and Old Norse"""
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Faroese-FarPaHC/master"
cached_path(f"{web_path}/fo_farpahc-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/fo_farpahc-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/fo_farpahc-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_FAROESE, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_FRENCH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_French-GSD/master"
cached_path(f"{ud_path}/fr_gsd-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/fr_gsd-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/fr_gsd-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_FRENCH, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_ITALIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Italian-ISDT/master"
cached_path(f"{ud_path}/it_isdt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/it_isdt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/it_isdt-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_ITALIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_LATIN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Latin-LLCT/master/"
cached_path(f"{web_path}/la_llct-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/la_llct-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/la_llct-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_LATIN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_SPANISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Spanish-GSD/master"
cached_path(f"{ud_path}/es_gsd-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/es_gsd-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/es_gsd-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_SPANISH, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_PORTUGUESE(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Portuguese-Bosque/master"
cached_path(f"{ud_path}/pt_bosque-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/pt_bosque-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/pt_bosque-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_PORTUGUESE, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_ROMANIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Romanian-RRT/master"
cached_path(f"{ud_path}/ro_rrt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ro_rrt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ro_rrt-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_ROMANIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_CATALAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Catalan-AnCora/master"
cached_path(f"{ud_path}/ca_ancora-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ca_ancora-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ca_ancora-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_CATALAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_POLISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Polish-LFG/master"
cached_path(f"{ud_path}/pl_lfg-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/pl_lfg-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/pl_lfg-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_POLISH, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_CZECH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = False,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Czech-PDT/master"
cached_path(f"{ud_path}/cs_pdt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/cs_pdt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(
f"{ud_path}/cs_pdt-ud-train-c.conllu",
Path("datasets") / dataset_name / "original",
)
cached_path(
f"{ud_path}/cs_pdt-ud-train-l.conllu",
Path("datasets") / dataset_name / "original",
)
cached_path(
f"{ud_path}/cs_pdt-ud-train-m.conllu",
Path("datasets") / dataset_name / "original",
)
cached_path(
f"{ud_path}/cs_pdt-ud-train-v.conllu",
Path("datasets") / dataset_name / "original",
)
data_path = flair.cache_root / "datasets" / dataset_name
train_filenames = [
"cs_pdt-ud-train-c.conllu",
"cs_pdt-ud-train-l.conllu",
"cs_pdt-ud-train-m.conllu",
"cs_pdt-ud-train-v.conllu",
]
new_train_file: Path = data_path / "cs_pdt-ud-train-all.conllu"
if not new_train_file.is_file():
with open(new_train_file, "wt") as f_out:
for train_filename in train_filenames:
with open(data_path / "original" / train_filename, "rt") as f_in:
f_out.write(f_in.read())
super(UD_CZECH, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_SLOVAK(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Slovak-SNK/master"
cached_path(f"{ud_path}/sk_snk-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sk_snk-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sk_snk-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_SLOVAK, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_SWEDISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Swedish-Talbanken/master"
cached_path(f"{ud_path}/sv_talbanken-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sv_talbanken-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sv_talbanken-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_SWEDISH, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_DANISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Danish-DDT/master"
cached_path(f"{ud_path}/da_ddt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/da_ddt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/da_ddt-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_DANISH, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_NORWEGIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Norwegian-Bokmaal/master"
cached_path(f"{ud_path}/no_bokmaal-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/no_bokmaal-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/no_bokmaal-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_NORWEGIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_FINNISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Finnish-TDT/master"
cached_path(f"{ud_path}/fi_tdt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/fi_tdt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/fi_tdt-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_FINNISH, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_SLOVENIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Slovenian-SSJ/master"
cached_path(f"{ud_path}/sl_ssj-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sl_ssj-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sl_ssj-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_SLOVENIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_CROATIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Croatian-SET/master"
cached_path(f"{ud_path}/hr_set-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/hr_set-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/hr_set-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_CROATIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_SERBIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Serbian-SET/master"
cached_path(f"{ud_path}/sr_set-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sr_set-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sr_set-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_SERBIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_BULGARIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Bulgarian-BTB/master"
cached_path(f"{ud_path}/bg_btb-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/bg_btb-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/bg_btb-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_BULGARIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_ARABIC(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Arabic-PADT/master"
cached_path(f"{ud_path}/ar_padt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ar_padt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ar_padt-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_ARABIC, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_HEBREW(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Hebrew-HTB/master"
cached_path(f"{ud_path}/he_htb-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/he_htb-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/he_htb-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_HEBREW, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_TURKISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Turkish-IMST/master"
cached_path(f"{ud_path}/tr_imst-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/tr_imst-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/tr_imst-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_TURKISH, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_PERSIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Persian-Seraji/master"
cached_path(f"{ud_path}/fa_seraji-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/fa_seraji-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/fa_seraji-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_PERSIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_RUSSIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Russian-SynTagRus/master"
cached_path(f"{ud_path}/ru_syntagrus-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ru_syntagrus-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ru_syntagrus-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_RUSSIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_HINDI(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Hindi-HDTB/master"
cached_path(f"{ud_path}/hi_hdtb-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/hi_hdtb-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/hi_hdtb-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_HINDI, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_INDONESIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Indonesian-GSD/master"
cached_path(f"{ud_path}/id_gsd-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/id_gsd-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/id_gsd-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_INDONESIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_JAPANESE(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Japanese-GSD/master"
cached_path(f"{ud_path}/ja_gsd-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ja_gsd-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ja_gsd-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_JAPANESE, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_CHINESE(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Chinese-GSD/master"
cached_path(f"{ud_path}/zh_gsd-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/zh_gsd-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/zh_gsd-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_CHINESE, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_KOREAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Korean-Kaist/master"
cached_path(f"{ud_path}/ko_kaist-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ko_kaist-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ko_kaist-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_KOREAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_BASQUE(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Basque-BDT/master"
cached_path(f"{ud_path}/eu_bdt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/eu_bdt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/eu_bdt-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_BASQUE, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_CHINESE_KYOTO(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Classical_Chinese-Kyoto/master"
cached_path(f"{web_path}/lzh_kyoto-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/lzh_kyoto-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/lzh_kyoto-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_CHINESE_KYOTO, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_GREEK(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Greek-GDT/master"
cached_path(f"{web_path}/el_gdt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/el_gdt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/el_gdt-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_GREEK, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_NAIJA(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Naija-NSC/master"
cached_path(f"{web_path}//pcm_nsc-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}//pcm_nsc-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}//pcm_nsc-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_NAIJA, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_LIVVI(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Livvi-KKPP/master"
cached_path(f"{web_path}/olo_kkpp-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/olo_kkpp-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_LIVVI, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_BURYAT(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Buryat-BDT/master"
cached_path(f"{web_path}/bxr_bdt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/bxr_bdt-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_BURYAT, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_NORTH_SAMI(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_North_Sami-Giella/master"
cached_path(f"{web_path}/sme_giella-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/sme_giella-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_NORTH_SAMI, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_MARATHI(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Marathi-UFAL/master"
cached_path(f"{web_path}/mr_ufal-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/mr_ufal-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/mr_ufal-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_MARATHI, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_MALTESE(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Maltese-MUDT/master"
cached_path(f"{web_path}/mt_mudt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/mt_mudt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/mt_mudt-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_MALTESE, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_AFRIKAANS(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Afrikaans-AfriBooms/master"
cached_path(f"{web_path}/af_afribooms-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/af_afribooms-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/af_afribooms-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_AFRIKAANS, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_GOTHIC(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Gothic-PROIEL/master"
cached_path(f"{web_path}/got_proiel-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/got_proiel-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/got_proiel-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_GOTHIC, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_OLD_FRENCH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Old_French-SRCMF/master"
cached_path(f"{web_path}/fro_srcmf-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/fro_srcmf-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/fro_srcmf-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_OLD_FRENCH, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_WOLOF(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Wolof-WTB/master"
cached_path(f"{web_path}/wo_wtb-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/wo_wtb-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/wo_wtb-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_WOLOF, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_BELARUSIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Belarusian-HSE/master"
cached_path(f"{web_path}/be_hse-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/be_hse-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/be_hse-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_BELARUSIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_COPTIC(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Coptic-Scriptorium/master"
cached_path(f"{web_path}/cop_scriptorium-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(
f"{web_path}/cop_scriptorium-ud-test.conllu",
Path("datasets") / dataset_name,
)
cached_path(
f"{web_path}/cop_scriptorium-ud-train.conllu",
Path("datasets") / dataset_name,
)
super(UD_COPTIC, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_IRISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Irish-IDT/master"
cached_path(f"{web_path}/ga_idt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/ga_idt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/ga_idt-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_IRISH, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_LATVIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Latvian-LVTB/master"
cached_path(f"{web_path}/lv_lvtb-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/lv_lvtb-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/lv_lvtb-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_LATVIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_LITHUANIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
split_multiwords: bool = True,
):
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
else:
base_path = Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Lithuanian-ALKSNIS/master"
cached_path(f"{web_path}/lt_alksnis-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/lt_alksnis-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/lt_alksnis-ud-train.conllu", Path("datasets") / dataset_name)
super(UD_LITHUANIAN, self).__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
| 36.868537
| 118
| 0.647247
| 8,296
| 67,027
| 4.860776
| 0.047372
| 0.068642
| 0.08057
| 0.096392
| 0.888481
| 0.877991
| 0.856863
| 0.847266
| 0.813391
| 0.77106
| 0
| 0.001148
| 0.246169
| 67,027
| 1,817
| 119
| 36.888828
| 0.79694
| 0.065824
| 0
| 0.635091
| 0
| 0
| 0.197787
| 0.093688
| 0
| 0
| 0
| 0
| 0.000824
| 1
| 0.051895
| false
| 0
| 0.00659
| 0.001647
| 0.110379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b9aa84c45c8e93a2554138f9e2a4b77270cab93f
| 111
|
py
|
Python
|
GameOfLife/tests/test_patterns.py
|
JnyJny/GameOfLife
|
5df27fa0b288466b779a9a89590b3e9b022583d2
|
[
"MIT"
] | 21
|
2016-06-29T20:50:01.000Z
|
2022-03-08T03:38:30.000Z
|
GameOfLife/tests/test_patterns.py
|
JnyJny/GameOfLife
|
5df27fa0b288466b779a9a89590b3e9b022583d2
|
[
"MIT"
] | null | null | null |
GameOfLife/tests/test_patterns.py
|
JnyJny/GameOfLife
|
5df27fa0b288466b779a9a89590b3e9b022583d2
|
[
"MIT"
] | 5
|
2016-06-29T20:50:16.000Z
|
2021-06-02T10:10:12.000Z
|
from unittest import TestCase
from GameOfLife.patterns import *
class PatternsTestCase(TestCase):
pass
| 12.333333
| 33
| 0.783784
| 12
| 111
| 7.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171171
| 111
| 8
| 34
| 13.875
| 0.945652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b9c8715cc66e4e6d309d76ed09cb501d37bd4a89
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/9d/85/4a/b09b5787a8095ef767d625b2ae1c6f930a50acaf9e2a8311cee8b090a9
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.40625
| 0
| 96
| 1
| 96
| 96
| 0.489583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b9d3b177026c7fdab65483b53d281820dd73e9a8
| 201
|
py
|
Python
|
modules_1/__init__.py
|
softmatter-design/nw_setup
|
4099b4f07639f7797e5ec0ed4521abed077bd7eb
|
[
"MIT"
] | null | null | null |
modules_1/__init__.py
|
softmatter-design/nw_setup
|
4099b4f07639f7797e5ec0ed4521abed077bd7eb
|
[
"MIT"
] | null | null | null |
modules_1/__init__.py
|
softmatter-design/nw_setup
|
4099b4f07639f7797e5ec0ed4521abed077bd7eb
|
[
"MIT"
] | null | null | null |
import modules_1.ReadNWConditions
import modules_1.SetupInitUDF
import modules_1.NWSetup
# import modules_1.RegNWSetup
# import modules_1.RndNWSetup
import modules_1.EquivCalcSetup
__version__ = '0.1'
| 25.125
| 33
| 0.855721
| 27
| 201
| 6
| 0.407407
| 0.481481
| 0.518519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 0.084577
| 201
| 8
| 34
| 25.125
| 0.836957
| 0.273632
| 0
| 0
| 0
| 0
| 0.020833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b9d64119d4f682b18a168c0e8414c701d892108f
| 47
|
py
|
Python
|
test.py
|
ManavR123/ProphetNet
|
fba9a5a924bb75a33a4d01845a845484321b5ea1
|
[
"MIT"
] | 2
|
2020-07-03T14:54:34.000Z
|
2022-01-22T22:45:52.000Z
|
archive/test.py
|
qingyun322/Virtual_TryOn
|
6a638c76078e33df747ecf38b507b326c90509f8
|
[
"MIT"
] | 20
|
2020-01-28T22:18:55.000Z
|
2021-09-08T01:21:52.000Z
|
test_gpu_torch.py
|
slds-lmu/paper_2019_variationalResampleDistributionShift
|
3664eea4d243eb828d13ba69112308630d80d244
|
[
"Apache-2.0",
"MIT"
] | 2
|
2019-12-14T09:17:47.000Z
|
2020-02-24T16:55:07.000Z
|
import torch
print(torch.cuda.is_available())
| 11.75
| 32
| 0.787234
| 7
| 47
| 5.142857
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 3
| 33
| 15.666667
| 0.837209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
b9e8f69dba0e60d084b94203c7af4fb3836e0161
| 169
|
py
|
Python
|
src/interpolation_robustness/__init__.py
|
michaelaerni/interpolation_robustness
|
be18c37a55b6ae1669391fe21e4aba3584fc9882
|
[
"MIT"
] | 1
|
2022-02-16T19:24:36.000Z
|
2022-02-16T19:24:36.000Z
|
src/interpolation_robustness/__init__.py
|
michaelaerni/interpolation_robustness
|
be18c37a55b6ae1669391fe21e4aba3584fc9882
|
[
"MIT"
] | null | null | null |
src/interpolation_robustness/__init__.py
|
michaelaerni/interpolation_robustness
|
be18c37a55b6ae1669391fe21e4aba3584fc9882
|
[
"MIT"
] | null | null | null |
from . import attacks
from . import closed_form
from . import data
from . import losses
from . import mlflow
from . import models
from . import plots
from . import util
| 18.777778
| 25
| 0.763314
| 25
| 169
| 5.12
| 0.44
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189349
| 169
| 8
| 26
| 21.125
| 0.934307
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6a07a84a5b1f8c29afbdb996bffb6ef683a95983
| 102
|
py
|
Python
|
Curso_Python_3_UDEMY/pacotes/pacote1/modulo1.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
Curso_Python_3_UDEMY/pacotes/pacote1/modulo1.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
Curso_Python_3_UDEMY/pacotes/pacote1/modulo1.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
print(f'Importado, modulo: {__name__} do pacote: {__package__}')
def soma(x, y):
return x + y
| 12.75
| 64
| 0.637255
| 15
| 102
| 3.8
| 0.866667
| 0.070175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 102
| 7
| 65
| 14.571429
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 0.54
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
6a0cf10cc27f30828d698c6d164cd7f6c3e793a4
| 13,097
|
py
|
Python
|
script.module.exodus/lib/resources/lib/sources/pl/szukajkatv.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2019-03-05T09:37:15.000Z
|
2019-03-05T09:37:15.000Z
|
script.module.exodus/lib/resources/lib/sources/pl/szukajkatv.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | null | null | null |
script.module.exodus/lib/resources/lib/sources/pl/szukajkatv.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2021-11-05T20:48:09.000Z
|
2021-11-05T20:48:09.000Z
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2017 homik
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re, urllib, urlparse, base64, json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['pl']
self.domains = ['szukajka.tv']
self.base_link = 'http://szukajka.tv'
self.search_link = '?q=%s&s=5&h=0&v=0&a='
def clean_serach(self, serach_str):
result = cleantitle.getsearch(serach_str);
result = re.sub(' +', ' ', result)
return result.strip()
def movie(self, imdb, title, localtitle, aliases, year):
return self.clean_serach(title) + ' ' + year
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
return self.clean_serach(tvshowtitle)
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
return url + ' s' + season.zfill(2) + 'e' + episode.zfill(2)
def contains_word(self, str_to_check, word):
return re.search(r'\b' + word + r'\b', str_to_check, re.IGNORECASE)
def contains_all_wors(self, str_to_check, words):
for word in words:
if not self.contains_word(str_to_check, word):
return False
return True
def sources(self, url, hostDict, hostprDict):
try:
words = url.split(' ')
search_url = urlparse.urljoin(self.base_link, self.search_link) % url
result = client.request(search_url)
sources = []
result = client.parseDOM(result, 'div', attrs={'class':'element'})
for el in result :
found_title = client.parseDOM(el, 'div', attrs={'class':'title'})[0]
if not self.contains_all_wors(found_title, words):
continue
q = 'SD'
if self.contains_word(found_title, '1080p'):
q = '1080p'
elif self.contains_word(found_title, '720p'):
q = 'HD'
link = client.parseDOM(el, 'a', attrs={'class':'link'}, ret='href')[0]
transl_type = client.parseDOM(el, 'span', attrs={'class':'version'})[0]
transl_type = transl_type.split(' ')
transl_type = transl_type[-1]
host = client.parseDOM(el, 'span', attrs={'class':'host'})[0]
host = host.split(' ')
host = host[-1]
lang, info = self.get_lang_by_type(transl_type)
sources.append({'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def get_lang_by_type(self, lang_type):
if lang_type == 'Lektor' or lang_type == 'Lek':
return 'pl', 'Lektor'
if lang_type == 'Dubbing' or lang_type == 'Dub':
return 'pl', 'Dubbing'
if lang_type == 'Napisy' or lang_type == 'Nap':
return 'pl', 'Napisy'
if lang_type == 'Polski' or lang_type == 'Pol':
return 'pl', None
return 'en', None
def resolve(self, url):
try:
r = client.request(url, output='extended')
url_res = client.parseDOM(r[0], 'a', attrs={'class':'submit'}, ret='href')[0]
mycookie = self.crazy_cookie_hash(r[4])
r = client.request(url_res, cookie=mycookie)
return client.parseDOM(r, 'iframe', ret='src')[0]
except:
return
def crazy_cookie_hash(self, mycookie):
tmp = 'ZGVmIGFiYyhpbl9hYmMpOg0KICAgIGRlZiByaGV4KGEpOg0KICAgICAgICBoZXhfY2hyID0gJzAxMjM0NTY3ODlhYmNkZWYnDQogICAgICAgIHJldCA9ICcnDQogICAgICAgIGZvciBpIGluIHJhbmdlKDQpOg0KICAgICAgICAgICAgcmV0ICs9IGhleF9jaHJbKGEgPj4gKGkgKiA4ICsgNCkpICYgMHgwRl0gKyBoZXhfY2hyWyhhID4+IChpICogOCkpICYgMHgwRl0NCiAgICAgICAgcmV0dXJuIHJldA0KICAgIGRlZiBoZXgodGV4dCk6DQogICAgICAgIHJldCA9ICcnDQogICAgICAgIGZvciBpIGluIHJhbmdlKGxlbih0ZXh0KSk6DQogICAgICAgICAgICByZXQgKz0gcmhleCh0ZXh0W2ldKQ0KICAgICAgICByZXR1cm4gcmV0DQogICAgZGVmIGFkZDMyKGEsIGIpOg0KICAgICAgICByZXR1cm4gKGEgKyBiKSAmIDB4RkZGRkZGRkYNCiAgICBkZWYgY21uKGEsIGIsIGMsIGQsIGUsIGYpOg0KICAgICAgICBiID0gYWRkMzIoYWRkMzIoYiwgYSksIGFkZDMyKGQsIGYpKTsNCiAgICAgICAgcmV0dXJuIGFkZDMyKChiIDw8IGUpIHwgKGIgPj4gKDMyIC0gZSkpLCBjKQ0KICAgIGRlZiBmZihhLCBiLCBjLCBkLCBlLCBmLCBnKToNCiAgICAgICAgcmV0dXJuIGNtbigoYiAmIGMpIHwgKCh+YikgJiBkKSwgYSwgYiwgZSwgZiwgZykNCiAgICBkZWYgZ2coYSwgYiwgYywgZCwgZSwgZiwgZyk6DQogICAgICAgIHJldHVybiBjbW4oKGIgJiBkKSB8IChjICYgKH5kKSksIGEsIGIsIGUsIGYsIGcpDQogICAgZGVmIGhoKGEsIGIsIGMsIGQsIGUsIGYsIGcpOg0KICAgICAgICByZXR1cm4gY21uKGIgXiBjIF4gZCwgYSwgYiwgZSwgZiwgZykNCiAgICBkZWYgaWkoYSwgYiwgYywgZCwgZSwgZiwgZyk6DQogICAgICAgIHJldHVybiBjbW4oYyBeIChiIHwgKH5kKSksIGEsIGIsIGUsIGYsIGcpDQogICAgZGVmIGNyeXB0Y3ljbGUodGFiQSwgdGFiQik6DQogICAgICAgIGEgPSB0YWJBWzBdDQogICAgICAgIGIgPSB0YWJBWzFdDQogICAgICAgIGMgPSB0YWJBWzJdDQogICAgICAgIGQgPSB0YWJBWzNdDQogICAgICAgIGEgPSBmZihhLCBiLCBjLCBkLCB0YWJCWzBdLCA3LCAtNjgwODc2OTM2KTsNCiAgICAgICAgZCA9IGZmKGQsIGEsIGIsIGMsIHRhYkJbMV0sIDEyLCAtMzg5NTY0NTg2KTsNCiAgICAgICAgYyA9IGZmKGMsIGQsIGEsIGIsIHRhYkJbMl0sIDE3LCA2MDYxMDU4MTkpOw0KICAgICAgICBiID0gZmYoYiwgYywgZCwgYSwgdGFiQlszXSwgMjIsIC0xMDQ0NTI1MzMwKTsNCiAgICAgICAgYSA9IGZmKGEsIGIsIGMsIGQsIHRhYkJbNF0sIDcsIC0xNzY0MTg4OTcpOw0KICAgICAgICBkID0gZmYoZCwgYSwgYiwgYywgdGFiQls1XSwgMTIsIDEyMDAwODA0MjYpOw0KICAgICAgICBjID0gZmYoYywgZCwgYSwgYiwgdGFiQls2XSwgMTcsIC0xNDczMjMxMzQxKTsNCiAgICAgICAgYiA9IGZmKGIsIGMsIGQsIGEsIHRhYkJbN10sIDIyLCAtNDU3MDU5ODMpOw0KICAgICAgICBhID0gZmYoYSwgYiwgYywgZCwgdGFiQls4XSwgNywgMTc3MDAzNTQxNik7DQogICAgICAgIGQgPSBmZihkLCBhLCBiLCBjLCB0YWJCWzldLCAxMiwgLTE5NTg0MTQ0MTcpOw0KICAgICAgICBjID0gZmYoYywgZCwgYSwgYiwgdGFiQlsxMF0sIDE3LCAtNDIwNjMpOw0KICAgICAgICBiID0gZmYoYiwgYywgZCwgYSwgdGFiQlsxMV0sIDIyLCAtMTk5MDQwNDE2Mik7DQogICAgICAgIGEgPSBmZihhLCBiLCBjLCBkLCB0YWJCWzEyXSwgNywgMTgwNDYwMzY4Mik7DQogICAgICAgIGQgPSBmZihkLCBhLCBiLCBjLCB0YWJCWzEzXSwgMTIsIC00MDM0MTEwMSk7DQogICAgICAgIGMgPSBmZihjLCBkLCBhLCBiLCB0YWJCWzE0XSwgMTcsIC0xNTAyMDAyMjkwKTsNCiAgICAgICAgYiA9IGZmKGIsIGMsIGQsIGEsIHRhYkJbMTVdLCAyMiwgMTIzNjUzNTMyOSk7DQogICAgICAgIGEgPSBnZyhhLCBiLCBjLCBkLCB0YWJCWzFdLCA1LCAtMTY1Nzk2NTEwKTsNCiAgICAgICAgZCA9IGdnKGQsIGEsIGIsIGMsIHRhYkJbNl0sIDksIC0xMDY5NTAxNjMyKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbMTFdLCAxNCwgNjQzNzE3NzEzKTsNCiAgICAgICAgYiA9IGdnKGIsIGMsIGQsIGEsIHRhYkJbMF0sIDIwLCAtMzczODk3MzAyKTsNCiAgICAgICAgYSA9IGdnKGEsIGIsIGMsIGQsIHRhYkJbNV0sIDUsIC03MDE1NTg2OTEpOw0KICAgICAgICBkID0gZ2coZCwgYSwgYiwgYywgdGFiQlsxMF0sIDksIDM4MDE2MDgzKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbMTVdLCAxNCwgLTY2MDQ3ODMzNSk7DQogICAgICAgIGIgPSBnZyhiLCBjLCBkLCBhLCB0YWJCWzRdLCAyMCwgLTQwNTUzNzg0OCk7DQogICAgICAgIGEgPSBnZyhhLCBiLCBjLCBkLCB0YWJCWzldLCA1LCA1Njg0NDY0MzgpOw0KICAgICAgICBkID0gZ2coZCwgYSwgYiwgYywgdGFiQlsxNF0sIDksIC0xMDE5ODAzNjkwKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbM10sIDE0LCAtMTg3MzYzOTYxKTsNCiAgICAgICAgYiA9IGdnKGIsIGMsIGQsIGEsIHRhYkJbOF0sIDIwLCAxMTYzNTMxNTAxKTsNCiAgICAgICAgYSA9IGdnKGEsIGIsIGMsIGQsIHRhYkJbMTNdLCA1LCAtMTQ0NDY4MTQ2Nyk7DQogICAgICAgIGQgPSBnZyhkLCBhLCBiLCBjLCB0YWJCWzJdLCA5LCAtNTE0MDM3ODQpOw0KICAgICAgICBjID0gZ2coYywgZCwgYSwgYiwgdGFiQls3XSwgMTQsIDE3MzUzMjg0NzMpOw0KICAgICAgICBiID0gZ2coYiwgYywgZCwgYSwgdGFiQlsxMl0sIDIwLCAtMTkyNjYwNzczNCk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzVdLCA0LCAtMzc4NTU4KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbOF0sIDExLCAtMjAyMjU3NDQ2Myk7DQogICAgICAgIGMgPSBoaChjLCBkLCBhLCBiLCB0YWJCWzExXSwgMTYsIDE4MzkwMzA1NjIpOw0KICAgICAgICBiID0gaGgoYiwgYywgZCwgYSwgdGFiQlsxNF0sIDIzLCAtMzUzMDk1NTYpOw0KICAgICAgICBhID0gaGgoYSwgYiwgYywgZCwgdGFiQlsxXSwgNCwgLTE1MzA5OTIwNjApOw0KICAgICAgICBkID0gaGgoZCwgYSwgYiwgYywgdGFiQls0XSwgMTEsIDEyNzI4OTMzNTMpOw0KICAgICAgICBjID0gaGgoYywgZCwgYSwgYiwgdGFiQls3XSwgMTYsIC0xNTU0OTc2MzIpOw0KICAgICAgICBiID0gaGgoYiwgYywgZCwgYSwgdGFiQlsxMF0sIDIzLCAtMTA5NDczMDY0MCk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzEzXSwgNCwgNjgxMjc5MTc0KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbMF0sIDExLCAtMzU4NTM3MjIyKTsNCiAgICAgICAgYyA9IGhoKGMsIGQsIGEsIGIsIHRhYkJbM10sIDE2LCAtNzIyNTIxOTc5KTsNCiAgICAgICAgYiA9IGhoKGIsIGMsIGQsIGEsIHRhYkJbNl0sIDIzLCA3NjAyOTE4OSk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzldLCA0LCAtNjQwMzY0NDg3KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbMTJdLCAxMSwgLTQyMTgxNTgzNSk7DQogICAgICAgIGMgPSBoaChjLCBkLCBhLCBiLCB0YWJCWzE1XSwgMTYsIDUzMDc0MjUyMCk7DQogICAgICAgIGIgPSBoaChiLCBjLCBkLCBhLCB0YWJCWzJdLCAyMywgLTk5NTMzODY1MSk7DQogICAgICAgIGEgPSBpaShhLCBiLCBjLCBkLCB0YWJCWzBdLCA2LCAtMTk4NjMwODQ0KTsNCiAgICAgICAgZCA9IGlpKGQsIGEsIGIsIGMsIHRhYkJbN10sIDEwLCAxMTI2ODkxNDE1KTsNCiAgICAgICAgYyA9IGlpKGMsIGQsIGEsIGIsIHRhYkJbMTRdLCAxNSwgLTE0MTYzNTQ5MDUpOw0KICAgICAgICBiID0gaWkoYiwgYywgZCwgYSwgdGFiQls1XSwgMjEsIC01NzQzNDA1NSk7DQogICAgICAgIGEgPSBpaShhLCBiLCBjLCBkLCB0YWJCWzEyXSwgNiwgMTcwMDQ4NTU3MSk7DQogICAgICAgIGQgPSBpaShkLCBhLCBiLCBjLCB0YWJCWzNdLCAxMCwgLTE4OTQ5ODY2MDYpOw0KICAgICAgICBjID0gaWkoYywgZCwgYSwgYiwgdGFiQlsxMF0sIDE1LCAtMTA1MTUyMyk7DQogICAgICAgIGIgPSBpaShiLCBjLCBkLCBhLCB0YWJCWzFdLCAyMSwgLTIwNTQ5MjI3OTkpOw0KICAgICAgICBhID0gaWkoYSwgYiwgYywgZCwgdGFiQls4XSwgNiwgMTg3MzMxMzM1OSk7DQogICAgICAgIGQgPSBpaShkLCBhLCBiLCBjLCB0YWJCWzE1XSwgMTAsIC0zMDYxMTc0NCk7DQogICAgICAgIGMgPSBpaShjLCBkLCBhLCBiLCB0YWJCWzZdLCAxNSwgLTE1NjAxOTgzODApOw0KICAgICAgICBiID0gaWkoYiwgYywgZCwgYSwgdGFiQlsxM10sIDIxLCAxMzA5MTUxNjQ5KTsNCiAgICAgICAgYSA9IGlpKGEsIGIsIGMsIGQsIHRhYkJbNF0sIDYsIC0xNDU1MjMwNzApOw0KICAgICAgICBkID0gaWkoZCwgYSwgYiwgYywgdGFiQlsxMV0sIDEwLCAtMTEyMDIxMDM3OSk7DQogICAgICAgIGMgPSBpaShjLCBkLCBhLCBiLCB0YWJCWzJdLCAxNSwgNzE4Nzg3MjU5KTsNCiAgICAgICAgYiA9IGlpKGIsIGMsIGQsIGEsIHRhYkJbOV0sIDIxLCAtMzQzNDg1NTUxKTsNCiAgICAgICAgdGFiQVswXSA9IGFkZDMyKGEsIHRhYkFbMF0pOw0KICAgICAgICB0YWJBWzFdID0gYWRkMzIoYiwgdGFiQVsxXSk7DQogICAgICAgIHRhYkFbMl0gPSBhZGQzMihjLCB0YWJBWzJdKTsNCiAgICAgICAgdGFiQVszXSA9IGFkZDMyKGQsIHRhYkFbM10pDQogICAgZGVmIGNyeXB0YmxrKHRleHQpOg0KICAgICAgICByZXQgPSBbXQ0KICAgICAgICBmb3IgaSBpbiByYW5nZSgwLCA2NCwgNCk6DQogICAgICAgICAgICByZXQuYXBwZW5kKG9yZCh0ZXh0W2ldKSArIChvcmQodGV4dFtpKzFdKSA8PCA4KSArIChvcmQodGV4dFtpKzJdKSA8PCAxNikgKyAob3JkKHRleHRbaSszXSkgPDwgMjQpKQ0KICAgICAgICByZXR1cm4gcmV0DQogICAgZGVmIGpjc3lzKHRleHQpOg0KICAgICAgICB0eHQgPSAnJzsNCiAgICAgICAgdHh0TGVuID0gbGVuKHRleHQpDQogICAgICAgIHJldCA9IFsxNzMyNTg0MTkzLCAtMjcxNzMzODc5LCAtMTczMjU4NDE5NCwgMjcxNzMzODc4XQ0KICAgICAgICBpID0gNjQNCiAgICAgICAgd2hpbGUgaSA8PSBsZW4odGV4dCk6DQogICAgICAgICAgICBjcnlwdGN5Y2xlKHJldCwgY3J5cHRibGsodGV4dFsnc3Vic3RyaW5nJ10oaSAtIDY0LCBpKSkpDQogICAgICAgICAgICBpICs9IDY0DQogICAgICAgIHRleHQgPSB0ZXh0W2kgLSA2NDpdDQogICAgICAgIHRtcCA9IFswLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwXQ0KICAgICAgICBpID0gMA0KICAgICAgICB3aGlsZSBpIDwgbGVuKHRleHQpOg0KICAgICAgICAgICAgdG1wW2kgPj4gMl0gfD0gb3JkKHRleHRbaV0pIDw8ICgoaSAlIDQpIDw8IDMpDQogICAgICAgICAgICBpICs9IDENCiAgICAgICAgdG1wW2kgPj4gMl0gfD0gMHg4MCA8PCAoKGkgJSA0KSA8PCAzKQ0KICAgICAgICBpZiBpID4gNTU6DQogICAgICAgICAgICBjcnlwdGN5Y2xlKHJldCwgdG1wKTsNCiAgICAgICAgICAgIGZvciBpIGluIHJhbmdlKDE2KToNCiAgICAgICAgICAgICAgICB0bXBbaV0gPSAwDQogICAgICAgIHRtcFsxNF0gPSB0eHRMZW4gKiA4Ow0KICAgICAgICBjcnlwdGN5Y2xlKHJldCwgdG1wKTsNCiAgICAgICAgcmV0dXJuIHJldA0KICAgIGRlZiByZXplZG93YSh0ZXh0KToNCiAgICAgICAgcmV0dXJuIGhleChqY3N5cyh0ZXh0KSkNCiAgICByZXR1cm4gcmV6ZWRvd2EoaW5fYWJjKQ0K'
tmp = base64.b64decode(tmp)
_myFun = compile(tmp, '', 'exec')
vGlobals = {"__builtins__": None, 'len': len, 'list': list, 'ord': ord, 'range': range}
vLocals = {'abc': ''}
exec _myFun in vGlobals, vLocals
myFun1 = vLocals['abc']
data = client.request(urlparse.urljoin(self.base_link, '/jsverify.php?op=tag'), cookie=mycookie)
data = byteify(json.loads(data))
d = {}
for i in range(len(data['key'])):
d[data['key'][i]] = data['hash'][i]
tmp = ''
for k in sorted(d.keys()):
tmp += d[k]
mycookie = 'tmvh=%s;%s' % (myFun1(tmp), mycookie)
return mycookie
def byteify(input):
if isinstance(input, dict):
return dict([(byteify(key), byteify(value)) for key, value in input.iteritems()])
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
| 85.601307
| 7,540
| 0.810033
| 670
| 13,097
| 15.731343
| 0.356716
| 0.006831
| 0.003795
| 0.005408
| 0.038899
| 0.017078
| 0
| 0
| 0
| 0
| 0
| 0.045716
| 0.13652
| 13,097
| 152
| 7,541
| 86.164474
| 0.886285
| 0.001603
| 0
| 0.059406
| 0
| 0
| 0.639039
| 0.608393
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.029703
| null | null | 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6a2b58076423184adbc4b6a857d0b70e03315654
| 2,504
|
py
|
Python
|
benderopt/tests/validation/test_uniform.py
|
tchar/benderopt
|
83a6bbb9c5732f6232c1a1bbc971f8022a975a28
|
[
"MIT"
] | 66
|
2019-01-08T14:34:21.000Z
|
2020-10-22T00:54:58.000Z
|
benderopt/tests/validation/test_uniform.py
|
tchar/benderopt
|
83a6bbb9c5732f6232c1a1bbc971f8022a975a28
|
[
"MIT"
] | 4
|
2019-03-03T19:17:26.000Z
|
2020-10-22T18:37:30.000Z
|
benderopt/tests/validation/test_uniform.py
|
tchar/benderopt
|
83a6bbb9c5732f6232c1a1bbc971f8022a975a28
|
[
"MIT"
] | 5
|
2019-04-29T03:28:54.000Z
|
2020-10-22T19:45:40.000Z
|
import pytest
from benderopt.validation.uniform import validate_uniform, validate_uniform_value
from benderopt.validation.utils import ValidationError
def test_uniform_search_space_ok():
search_space = {
"low": -5,
"high": 5,
"step": 0.1,
}
search_space = validate_uniform(search_space)
def test_uniform_search_space_not_dict():
search_space = ([-5, 5],)
with pytest.raises(ValidationError):
search_space = validate_uniform(search_space)
def test_uniform_search_space_no_high():
search_space = {
"low": -5,
}
with pytest.raises(ValidationError):
search_space = validate_uniform(search_space)
def test_uniform_search_space_no_low():
search_space = {
"high": 5,
}
with pytest.raises(ValidationError):
search_space = validate_uniform(search_space)
def test_uniform_search_space_bad_high():
search_space = {
"low": [-5],
"high": 5,
}
with pytest.raises(ValidationError):
search_space = validate_uniform(search_space)
def test_uniform_search_space_bad_low():
search_space = {
"low": -5,
"high": [5],
}
with pytest.raises(ValidationError):
search_space = validate_uniform(search_space)
def test_uniform_search_space_bad_low_high():
search_space = {
"low": 6,
"high": 5,
}
with pytest.raises(ValidationError):
search_space = validate_uniform(search_space)
def test_uniform_search_space_bad_step():
search_space = {"low": 0, "high": 5, "step": [1]}
with pytest.raises(ValidationError):
search_space = validate_uniform(search_space)
search_space = {
"low": 0,
"high": 5,
"step": 6,
}
with pytest.raises(ValidationError):
search_space = validate_uniform(search_space)
def test_uniform_search_space_no_step():
search_space = {
"low": 0,
"high": 5,
}
search_space = validate_uniform(search_space)
assert "step" in search_space.keys()
assert search_space["step"] is None
def test_uniform_value():
search_space = {
"low": 0,
"high": 5,
}
assert validate_uniform_value(0, **search_space) is True
assert validate_uniform_value(5, **search_space) is False
assert validate_uniform_value(2, **search_space) is True
assert validate_uniform_value(10, **search_space) is False
assert validate_uniform_value(-10, **search_space) is False
| 22.763636
| 81
| 0.661741
| 301
| 2,504
| 5.146179
| 0.122924
| 0.333764
| 0.220788
| 0.16785
| 0.808909
| 0.778567
| 0.728857
| 0.679793
| 0.612653
| 0.55326
| 0
| 0.016137
| 0.232827
| 2,504
| 109
| 82
| 22.972477
| 0.790213
| 0
| 0
| 0.526316
| 0
| 0
| 0.033147
| 0
| 0
| 0
| 0
| 0
| 0.092105
| 1
| 0.131579
| false
| 0
| 0.039474
| 0
| 0.171053
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6a4a973eed563382d3b40e778b750ae27cec8e7a
| 3,046
|
py
|
Python
|
problog/test/test_try_call.py
|
HEmile/problog
|
576b6fd305f72b12125111c8d4d62cf8a7bbda0f
|
[
"Apache-2.0"
] | 189
|
2019-05-27T08:20:10.000Z
|
2022-03-28T09:29:22.000Z
|
problog/test/test_try_call.py
|
HEmile/problog
|
576b6fd305f72b12125111c8d4d62cf8a7bbda0f
|
[
"Apache-2.0"
] | 60
|
2019-06-11T15:07:48.000Z
|
2022-03-25T02:31:23.000Z
|
problog/test/test_try_call.py
|
HEmile/problog
|
576b6fd305f72b12125111c8d4d62cf8a7bbda0f
|
[
"Apache-2.0"
] | 33
|
2019-07-03T13:14:24.000Z
|
2022-02-20T01:07:15.000Z
|
import unittest
from problog import get_evaluatable
from problog.program import PrologString
class TestTryCall(unittest.TestCase):
def test_try_call_existing_fact(self):
p = PrologString(
"""
a(1).
res :- try_call(a(1)).
query(res).
"""
)
res = get_evaluatable().create_from(p).evaluate()
self.assertEqual(list(res.values())[0], 1)
def test_try_call_existing_fact_non_ground(self):
p = PrologString(
"""
a(1).
res :- try_call(a(X)).
query(res).
"""
)
res = get_evaluatable().create_from(p).evaluate()
self.assertEqual(list(res.values())[0], 1)
def test_try_call_existing_fact_non_ground2(self):
p = PrologString(
"""
a(1).
b :- a(X), X > 0.
res :- try_call(b).
query(res).
"""
)
res = get_evaluatable().create_from(p).evaluate()
self.assertEqual(list(res.values())[0], 1)
def test_try_call_non_existing_fact_non_ground(self):
p = PrologString(
"""
a(1).
b :- a(X), X > 2.
res :- try_call(b).
query(res).
"""
)
res = get_evaluatable().create_from(p).evaluate()
self.assertEqual(list(res.values())[0], 0)
def test_try_call_non_existing_fact_non_ground2(self):
p = PrologString(
"""
a(1).
res :- try_call(b(X)).
query(res).
"""
)
res = get_evaluatable().create_from(p).evaluate()
self.assertEqual(list(res.values())[0], 0)
def test_try_call_wrong_comp(self):
p = PrologString(
"""
res :- try_call(1 > 2).
query(res).
"""
)
res = get_evaluatable().create_from(p).evaluate()
self.assertEqual(list(res.values())[0], 0)
def test_try_call_right_comp(self):
p = PrologString(
"""
res :- try_call(1 < 2).
query(res).
"""
)
res = get_evaluatable().create_from(p).evaluate()
self.assertEqual(list(res.values())[0], 1)
def test_try_call_non_existing_comp(self):
p = PrologString(
"""
res :- try_call(X > 2).
query(res).
"""
)
res = get_evaluatable().create_from(p).evaluate()
self.assertEqual(list(res.values())[0], 0)
def test_try_call_existing_clause(self):
p = PrologString(
"""
a:(c:-d).
res :- try_call(a:(c:-d)).
query(res).
"""
)
res = get_evaluatable().create_from(p).evaluate()
self.assertEqual(list(res.values())[0], 1)
def test_try_call_non_existing_clause(self):
p = PrologString(
"""
a:(c:-d).
res :- try_call(a:(c:-e)).
query(res).
"""
)
res = get_evaluatable().create_from(p).evaluate()
self.assertEqual(list(res.values())[0], 0)
| 24.174603
| 58
| 0.512147
| 354
| 3,046
| 4.172316
| 0.118644
| 0.094787
| 0.067705
| 0.094787
| 0.911984
| 0.911984
| 0.894381
| 0.873392
| 0.86459
| 0.747461
| 0
| 0.017284
| 0.335194
| 3,046
| 125
| 59
| 24.368
| 0.712099
| 0
| 0
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 1
| 0.185185
| false
| 0
| 0.055556
| 0
| 0.259259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dbe0888ab7e240f6324677dee3db5a9748e91fd6
| 202
|
py
|
Python
|
task_core/exceptions.py
|
cloudnull/task-core
|
a57ba10f553cf1829f98eb2810a40a35f0b33c19
|
[
"Apache-2.0"
] | null | null | null |
task_core/exceptions.py
|
cloudnull/task-core
|
a57ba10f553cf1829f98eb2810a40a35f0b33c19
|
[
"Apache-2.0"
] | null | null | null |
task_core/exceptions.py
|
cloudnull/task-core
|
a57ba10f553cf1829f98eb2810a40a35f0b33c19
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""exception classess"""
class InvalidRole(Exception):
"""Exception if role is not defined"""
class InvalidService(Exception):
"""Exception if service is not defined"""
| 22.444444
| 45
| 0.707921
| 24
| 202
| 5.958333
| 0.625
| 0.251748
| 0.27972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005814
| 0.148515
| 202
| 8
| 46
| 25.25
| 0.825581
| 0.539604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
dbe99be365f8bca97ab3221edcb82b510f1feab5
| 4,386
|
py
|
Python
|
qcdb/tests/test_tu6_cp.py
|
loriab/qccddb
|
d9e156ef8b313ac0633211fc6b841f84a3ddde24
|
[
"BSD-3-Clause"
] | 8
|
2019-03-28T11:54:59.000Z
|
2022-03-19T03:31:37.000Z
|
qcdb/tests/test_tu6_cp.py
|
loriab/qccddb
|
d9e156ef8b313ac0633211fc6b841f84a3ddde24
|
[
"BSD-3-Clause"
] | 39
|
2018-10-31T23:02:18.000Z
|
2021-12-12T22:11:37.000Z
|
qcdb/tests/test_tu6_cp.py
|
loriab/qccddb
|
d9e156ef8b313ac0633211fc6b841f84a3ddde24
|
[
"BSD-3-Clause"
] | 9
|
2018-03-12T20:51:50.000Z
|
2022-02-28T15:18:34.000Z
|
"""
from https://github.com/psi4/psi4/blob/master/tests/tu6-cp-ne2/input.dat
Example potential energy surface scan and CP-correction for Ne2
"""
import pprint
import pytest
import qcengine
from qcelemental import constants
from qcengine.testing import using
import qcdb
from .utils import *
tu6_ie_scan = {2.5: 0.757717, 3.0: 0.015685, 4.0: -0.016266}
@using("psi4")
def test_tu6_cp_psi4():
dimer = qcdb.set_molecule(
"""
Ne
--
Ne 1 R
"""
)
qcdb.set_options(
{
"basis": "aug-cc-pVDZ",
"freeze_core": True,
}
)
Rvals = [2.5, 3.0, 4.0]
ecp = {}
for R in Rvals:
dimer.R = R
ecp[R], wfn = qcdb.energy("p4-ccsd(t)", bsse_type="cp", return_wfn=True)
pprint.pprint(wfn, width=200)
assert compare_values(
tu6_ie_scan[R], ecp[R] * constants.hartree2kcalmol, atol=1.0e-4, label=f"CP-CCSD(T) [{R:3.1f}]"
)
# assert compare("Psi4", wfn["provenance"]["creator"], "harness")
print("\nCP-corrected CCSD(T)/aug-cc-pVDZ interaction energies\n\n")
print(" R [Ang] E_int [kcal/mol] \n")
print("-----------------------------------------------------\n")
for R in Rvals:
e = ecp[R] * constants.hartree2kcalmol
print(f" {R:3.1f} {e:10.6f}\n")
@using("cfour")
def test_tu6_cp_cfour():
dimer = qcdb.set_molecule(
"""
Ne
--
Ne 1 R
"""
)
qcdb.set_options(
{
"basis": "aug-cc-pVDZ",
"freeze_core": True,
}
)
Rvals = [2.5, 3.0, 4.0]
ecp = {}
for R in Rvals:
dimer.R = R
ecp[R], wfn = qcdb.energy("c4-ccsd(t)", bsse_type="cp", return_wfn=True)
pprint.pprint(wfn, width=200)
assert compare_values(
tu6_ie_scan[R], ecp[R] * constants.hartree2kcalmol, atol=1.0e-4, label=f"CP-CCSD(T) [{R:3.1f}]"
)
# assert compare("CFOUR", wfn["provenance"]["creator"], "harness")
print("\nCP-corrected CCSD(T)/aug-cc-pVDZ interaction energies\n\n")
print(" R [Ang] E_int [kcal/mol] \n")
print("-----------------------------------------------------\n")
for R in Rvals:
e = ecp[R] * constants.hartree2kcalmol
print(f" {R:3.1f} {e:10.6f}\n")
@using("nwchem")
def test_tu6_cp_nwchem():
dimer = qcdb.set_molecule(
"""
Ne
--
Ne 1 R
"""
)
qcdb.set_options(
{
"basis": "aug-cc-pVDZ",
"freeze_core": True,
}
)
Rvals = [2.5, 3.0, 4.0]
ecp = {}
for R in Rvals:
dimer.R = R
ecp[R], wfn = qcdb.energy("nwc-ccsd(t)", bsse_type="cp", return_wfn=True)
pprint.pprint(wfn, width=200)
assert compare_values(
tu6_ie_scan[R], ecp[R] * constants.hartree2kcalmol, atol=1.0e-4, label=f"CP-CCSD(T) [{R:3.1f}]"
)
# assert compare("NWChem", wfn["provenance"]["creator"], "harness")
print("\nCP-corrected CCSD(T)/aug-cc-pVDZ interaction energies\n\n")
print(" R [Ang] E_int [kcal/mol] \n")
print("-----------------------------------------------------\n")
for R in Rvals:
e = ecp[R] * constants.hartree2kcalmol
print(f" {R:3.1f} {e:10.6f}\n")
@using("gamess")
def test_tu6_cp_gamess():
dimer = qcdb.set_molecule(
"""
Ne
--
Ne 1 R
"""
)
qcdb.set_options(
{
"basis": "aug-cc-pVDZ",
"freeze_core": True,
}
)
Rvals = [2.5, 3.0, 4.0]
ecp = {}
for R in Rvals:
dimer.R = R
ecp[R], wfn = qcdb.energy("gms-ccsd(t)", bsse_type="cp", return_wfn=True)
pprint.pprint(wfn, width=200)
assert compare_values(
tu6_ie_scan[R], ecp[R] * constants.hartree2kcalmol, atol=1.0e-4, label=f"CP-CCSD(T) [{R:3.1f}]"
)
# assert compare("GAMESS", wfn["provenance"]["creator"], "harness")
print("\nCP-corrected CCSD(T)/aug-cc-pVDZ interaction energies\n\n")
print(" R [Ang] E_int [kcal/mol] \n")
print("-----------------------------------------------------\n")
for R in Rvals:
e = ecp[R] * constants.hartree2kcalmol
print(f" {R:3.1f} {e:10.6f}\n")
| 26.421687
| 107
| 0.492704
| 575
| 4,386
| 3.671304
| 0.172174
| 0.022738
| 0.034107
| 0.041686
| 0.812411
| 0.812411
| 0.812411
| 0.812411
| 0.812411
| 0.812411
| 0
| 0.043677
| 0.300502
| 4,386
| 165
| 108
| 26.581818
| 0.644394
| 0.090743
| 0
| 0.62963
| 0
| 0
| 0.28695
| 0.057652
| 0
| 0
| 0
| 0
| 0.037037
| 1
| 0.037037
| false
| 0
| 0.064815
| 0
| 0.101852
| 0.194444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dbf5a790128fdf7a49d4edb0e8ae12c97724cd14
| 75
|
py
|
Python
|
src/dx/ams/text/scraper/__init__.py
|
lmmx/dx
|
063e8f8cfc24dfdf09a12001b58b4017a75ea3e8
|
[
"MIT"
] | null | null | null |
src/dx/ams/text/scraper/__init__.py
|
lmmx/dx
|
063e8f8cfc24dfdf09a12001b58b4017a75ea3e8
|
[
"MIT"
] | 2
|
2021-01-03T16:22:11.000Z
|
2021-02-07T08:41:57.000Z
|
src/dx/ams/text/scraper/__init__.py
|
lmmx/dx
|
063e8f8cfc24dfdf09a12001b58b4017a75ea3e8
|
[
"MIT"
] | null | null | null |
from .crawler import *
from .pickle_utils import *
from .reparser import *
| 18.75
| 27
| 0.76
| 10
| 75
| 5.6
| 0.6
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 75
| 3
| 28
| 25
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.